diff --git a/.changes/2.859.0.json b/.changes/2.859.0.json new file mode 100644 index 0000000000..c486a1e6f3 --- /dev/null +++ b/.changes/2.859.0.json @@ -0,0 +1,17 @@ +[ + { + "type": "feature", + "category": "EMR", + "description": "Amazon EMR customers can now specify how EC2 On-Demand Capacity Reservations are used in their EMR clusters with instance fleets using allocation strategy." + }, + { + "type": "feature", + "category": "KinesisVideoArchivedMedia", + "description": "Increase the maximum HLS and MPEG-DASH manifest size from 1,000 to 5,000 fragments." + }, + { + "type": "feature", + "category": "Lambda", + "description": "Documentation updates for Lambda. Constraint updates to AddLayerVersionPermission's Action and OrganizationId parameters, and AddPermission's Principal and SourceAccount parameters." + } +] \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 65b3e7a415..2be447ff59 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,12 @@ # Changelog for AWS SDK for JavaScript - + +## 2.859.0 +* feature: EMR: Amazon EMR customers can now specify how EC2 On-Demand Capacity Reservations are used in their EMR clusters with instance fleets using allocation strategy. +* feature: KinesisVideoArchivedMedia: Increase the maximum HLS and MPEG-DASH manifest size from 1,000 to 5,000 fragments. +* feature: Lambda: Documentation updates for Lambda. Constraint updates to AddLayerVersionPermission's Action and OrganizationId parameters, and AddPermission's Principal and SourceAccount parameters. + ## 2.858.0 * feature: Athena: Adds APIs to create, list, update, and delete prepared SQL statements that have optional placeholder parameters. A prepared statement can use different values for these parameters each time it is run. * feature: CodePipeline: Updated the parameters to make actionType required for UpdateActionType diff --git a/README.md b/README.md index c22ce012d3..6eb1b25b54 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ For release notes, see the [CHANGELOG](https://github.com/aws/aws-sdk-js/blob/ma To use the SDK in the browser, simply add the following script tag to your HTML pages: - + You can also build a custom browser SDK with your specified set of AWS services. This can allow you to reduce the SDK's size, specify different API versions of diff --git a/apis/autoscaling-2011-01-01.normal.json b/apis/autoscaling-2011-01-01.normal.json index 8ffbbc5500..ee56c2285f 100644 --- a/apis/autoscaling-2011-01-01.normal.json +++ b/apis/autoscaling-2011-01-01.normal.json @@ -1855,7 +1855,7 @@ }, "MixedInstancesPolicy": { "shape": "MixedInstancesPolicy", - "documentation": "
An embedded object that specifies a mixed instances policy. The required parameters must be specified. If optional parameters are unspecified, their default values are used.
The policy includes parameters that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities, but also the parameters that specify the instance configuration information—the launch template and instance types. The policy can also include a weight for each instance type and different launch templates for individual instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.
" + "documentation": "An embedded object that specifies a mixed instances policy. The required properties must be specified. If optional properties are unspecified, their default values are used.
The policy includes properties that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities, but also the properties that specify the instance configuration information—the launch template and instance types. The policy can also include a weight for each instance type and different launch templates for individual instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.
" }, "InstanceId": { "shape": "XmlStringMaxLen19", @@ -2998,7 +2998,7 @@ "members": { "OnDemandAllocationStrategy": { "shape": "XmlString", - "documentation": "Indicates how to allocate instance types to fulfill On-Demand capacity. The only valid value is prioritized
, which is also the default value. This strategy uses the order of instance types in the overrides to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.
Indicates how to allocate instance types to fulfill On-Demand capacity. The only valid value is prioritized
, which is also the default value. This strategy uses the order of instance types in the LaunchTemplateOverrides
to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on.
Indicates how to allocate instances across Spot Instance pools. If the allocation strategy is capacity-optimized
(recommended), the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity. If the allocation strategy is lowest-price
, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. Defaults to lowest-price
if not specified.
Indicates how to allocate instances across Spot Instance pools.
If the allocation strategy is lowest-price
, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. Defaults to lowest-price
if not specified.
If the allocation strategy is capacity-optimized
(recommended), the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity. Alternatively, you can use capacity-optimized-prioritized
and set the order of instance types in the list of launch template overrides from highest to lowest priority (from first to last in the list). Amazon EC2 Auto Scaling honors the instance type priorities on a best-effort basis but optimizes for capacity first.
Any parameters that you specify override the same parameters in the launch template. If not provided, Amazon EC2 Auto Scaling uses the instance type specified in the launch template when it launches an instance.
" + "documentation": "Any properties that you specify override the same properties in the launch template. If not provided, Amazon EC2 Auto Scaling uses the instance type specified in the launch template when it launches an instance.
" } }, - "documentation": "Describes a launch template and overrides.
You specify these parameters as part of a mixed instances policy.
When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.
" + "documentation": "Describes a launch template and overrides.
You specify these properties as part of a mixed instances policy.
When you update the launch template or overrides, existing Amazon EC2 instances continue to run. When scale out occurs, Amazon EC2 Auto Scaling launches instances to match the new settings. When scale in occurs, Amazon EC2 Auto Scaling terminates instances according to the group's termination policies.
" }, "LaunchTemplateName": { "type": "string", @@ -3543,10 +3543,10 @@ }, "InstancesDistribution": { "shape": "InstancesDistribution", - "documentation": "Specifies the instances distribution. If not provided, the value for each parameter in InstancesDistribution
uses a default value.
Specifies the instances distribution. If not provided, the value for each property in InstancesDistribution
uses a default value.
Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.
You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy
as the top-level parameter instead of a launch configuration or launch template.
Describes a mixed instances policy for an Auto Scaling group. With mixed instances, your Auto Scaling group can provision a combination of On-Demand Instances and Spot Instances across multiple instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.
You can create a mixed instances policy for a new Auto Scaling group, or you can create it for an existing group by updating the group to specify MixedInstancesPolicy
as the top-level property instead of a launch configuration or launch template.
An embedded object that specifies a mixed instances policy. When you make changes to an existing policy, all optional parameters are left unchanged if not specified. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.
" + "documentation": "An embedded object that specifies a mixed instances policy. When you make changes to an existing policy, all optional properties are left unchanged if not specified. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide.
" }, "MinSize": { "shape": "AutoScalingGroupMinSize", diff --git a/apis/elasticmapreduce-2009-03-31.min.json b/apis/elasticmapreduce-2009-03-31.min.json index 16848daaee..db8da7c77b 100644 --- a/apis/elasticmapreduce-2009-03-31.min.json +++ b/apis/elasticmapreduce-2009-03-31.min.json @@ -45,7 +45,7 @@ ], "members": { "InstanceGroups": { - "shape": "Su" + "shape": "Sx" }, "JobFlowId": {} } @@ -72,7 +72,7 @@ "members": { "JobFlowId": {}, "Steps": { - "shape": "S1f" + "shape": "S1i" } } }, @@ -80,7 +80,7 @@ "type": "structure", "members": { "StepIds": { - "shape": "S1o" + "shape": "S1r" } } } @@ -95,7 +95,7 @@ "members": { "ResourceId": {}, "Tags": { - "shape": "S1r" + "shape": "S1u" } } }, @@ -114,7 +114,7 @@ "members": { "ClusterId": {}, "StepIds": { - "shape": "S1o" + "shape": "S1r" }, "StepCancellationOption": {} } @@ -182,7 +182,7 @@ "AuthMode": {}, "VpcId": {}, "SubnetIds": { - "shape": "S26" + "shape": "S29" }, "ServiceRole": {}, "UserRole": {}, @@ -190,7 +190,7 @@ "EngineSecurityGroupId": {}, "DefaultS3Location": {}, "Tags": { - "shape": "S1r" + "shape": "S1u" } } }, @@ -279,7 +279,7 @@ "Id": {}, "Name": {}, "Status": { - "shape": "S2i" + "shape": "S2l" }, "Ec2InstanceAttributes": { "type": "structure", @@ -287,21 +287,21 @@ "Ec2KeyName": {}, "Ec2SubnetId": {}, "RequestedEc2SubnetIds": { - "shape": "S2o" + "shape": "S2r" }, "Ec2AvailabilityZone": {}, "RequestedEc2AvailabilityZones": { - "shape": "S2o" + "shape": "S2r" }, "IamInstanceProfile": {}, "EmrManagedMasterSecurityGroup": {}, "EmrManagedSlaveSecurityGroup": {}, "ServiceAccessSecurityGroup": {}, "AdditionalMasterSecurityGroups": { - "shape": "S2p" + "shape": "S2s" }, "AdditionalSlaveSecurityGroups": { - "shape": "S2p" + "shape": "S2s" } } }, @@ -321,10 +321,10 @@ "type": "boolean" }, "Applications": { - "shape": "S2s" + "shape": "S2v" }, "Tags": { - "shape": "S1r" + "shape": "S1u" }, "ServiceRole": {}, "NormalizedInstanceHours": { @@ -343,7 +343,7 @@ }, "RepoUpgradeOnBoot": {}, "KerberosAttributes": { - "shape": "S2w" + "shape": "S2z" }, "ClusterArn": {}, "OutpostArn": {}, @@ -351,7 +351,7 @@ "type": "integer" }, "PlacementGroups": { - "shape": "S2y" + "shape": "S31" } } } @@ -369,7 +369,7 @@ "type": "timestamp" }, "JobFlowIds": { - "shape": "S1m" + "shape": "S1p" }, "JobFlowStates": { "type": "list", @@ -483,7 +483,7 @@ "Ec2KeyName": {}, "Ec2SubnetId": {}, "Placement": { - "shape": "S3c" + "shape": "S3f" }, "KeepJobFlowAliveWhenNoSteps": { "type": "boolean" @@ -504,7 +504,7 @@ ], "members": { "StepConfig": { - "shape": "S1g" + "shape": "S1j" }, "ExecutionStatusDetail": { "type": "structure", @@ -535,13 +535,13 @@ "type": "structure", "members": { "BootstrapActionConfig": { - "shape": "S3j" + "shape": "S3m" } } } }, "SupportedProducts": { - "shape": "S3l" + "shape": "S3o" }, "VisibleToAllUsers": { "type": "boolean" @@ -576,7 +576,7 @@ "NotebookExecutionId": {}, "EditorId": {}, "ExecutionEngine": { - "shape": "S3p" + "shape": "S3s" }, "NotebookExecutionName": {}, "NotebookParams": {}, @@ -592,7 +592,7 @@ "LastStateChangeReason": {}, "NotebookInstanceSecurityGroupId": {}, "Tags": { - "shape": "S1r" + "shape": "S1u" } } } @@ -641,11 +641,11 @@ "Id": {}, "Name": {}, "Config": { - "shape": "S3x" + "shape": "S40" }, "ActionOnFailure": {}, "Status": { - "shape": "S3y" + "shape": "S41" } } } @@ -675,7 +675,7 @@ "AuthMode": {}, "VpcId": {}, "SubnetIds": { - "shape": "S26" + "shape": "S29" }, "ServiceRole": {}, "UserRole": {}, @@ -687,7 +687,7 @@ }, "DefaultS3Location": {}, "Tags": { - "shape": "S1r" + "shape": "S1u" } } } @@ -707,7 +707,7 @@ ], "members": { "BlockPublicAccessConfiguration": { - "shape": "S49" + "shape": "S4c" }, "BlockPublicAccessConfigurationMetadata": { "type": "structure", @@ -739,7 +739,7 @@ "type": "structure", "members": { "ManagedScalingPolicy": { - "shape": "S4g" + "shape": "S4j" } } } @@ -802,7 +802,7 @@ "Name": {}, "ScriptPath": {}, "Args": { - "shape": "S2p" + "shape": "S2s" } } } @@ -839,7 +839,7 @@ "Id": {}, "Name": {}, "Status": { - "shape": "S2i" + "shape": "S2l" }, "NormalizedInstanceHours": { "type": "integer" @@ -931,7 +931,7 @@ "shape": "Sh" }, "EbsBlockDevices": { - "shape": "S57" + "shape": "S5a" }, "EbsOptimized": { "type": "boolean" @@ -1020,16 +1020,16 @@ "type": "long" }, "EbsBlockDevices": { - "shape": "S57" + "shape": "S5a" }, "EbsOptimized": { "type": "boolean" }, "ShrinkPolicy": { - "shape": "S5k" + "shape": "S5n" }, "AutoScalingPolicy": { - "shape": "S5o" + "shape": "S5r" } } } @@ -1201,7 +1201,7 @@ "member": {} }, "StepIds": { - "shape": "S1m" + "shape": "S1p" }, "Marker": {} } @@ -1217,11 +1217,11 @@ "Id": {}, "Name": {}, "Config": { - "shape": "S3x" + "shape": "S40" }, "ActionOnFailure": {}, "Status": { - "shape": "S3y" + "shape": "S41" } } } @@ -1363,7 +1363,7 @@ "member": {} }, "ShrinkPolicy": { - "shape": "S5k" + "shape": "S5n" }, "Configurations": { "shape": "Sh" @@ -1386,7 +1386,7 @@ "ClusterId": {}, "InstanceGroupId": {}, "AutoScalingPolicy": { - "shape": "Sy" + "shape": "S11" } } }, @@ -1396,7 +1396,7 @@ "ClusterId": {}, "InstanceGroupId": {}, "AutoScalingPolicy": { - "shape": "S5o" + "shape": "S5r" }, "ClusterArn": {} } @@ -1410,7 +1410,7 @@ ], "members": { "BlockPublicAccessConfiguration": { - "shape": "S49" + "shape": "S4c" } } }, @@ -1429,7 +1429,7 @@ "members": { "ClusterId": {}, "ManagedScalingPolicy": { - "shape": "S4g" + "shape": "S4j" } } }, @@ -1480,7 +1480,7 @@ "members": { "ResourceId": {}, "TagKeys": { - "shape": "S2p" + "shape": "S2s" } } }, @@ -1512,7 +1512,7 @@ "type": "integer" }, "InstanceGroups": { - "shape": "Su" + "shape": "Sx" }, "InstanceFleets": { "type": "list", @@ -1522,7 +1522,7 @@ }, "Ec2KeyName": {}, "Placement": { - "shape": "S3c" + "shape": "S3f" }, "KeepJobFlowAliveWhenNoSteps": { "type": "boolean" @@ -1533,30 +1533,30 @@ "HadoopVersion": {}, "Ec2SubnetId": {}, "Ec2SubnetIds": { - "shape": "S2o" + "shape": "S2r" }, "EmrManagedMasterSecurityGroup": {}, "EmrManagedSlaveSecurityGroup": {}, "ServiceAccessSecurityGroup": {}, "AdditionalMasterSecurityGroups": { - "shape": "S7e" + "shape": "S7h" }, "AdditionalSlaveSecurityGroups": { - "shape": "S7e" + "shape": "S7h" } } }, "Steps": { - "shape": "S1f" + "shape": "S1i" }, "BootstrapActions": { "type": "list", "member": { - "shape": "S3j" + "shape": "S3m" } }, "SupportedProducts": { - "shape": "S3l" + "shape": "S3o" }, "NewSupportedProducts": { "type": "list", @@ -1565,13 +1565,13 @@ "members": { "Name": {}, "Args": { - "shape": "S1m" + "shape": "S1p" } } } }, "Applications": { - "shape": "S2s" + "shape": "S2v" }, "Configurations": { "shape": "Sh" @@ -1582,7 +1582,7 @@ "JobFlowRole": {}, "ServiceRole": {}, "Tags": { - "shape": "S1r" + "shape": "S1u" }, "SecurityConfiguration": {}, "AutoScalingRole": {}, @@ -1593,16 +1593,16 @@ }, "RepoUpgradeOnBoot": {}, "KerberosAttributes": { - "shape": "S2w" + "shape": "S2z" }, "StepConcurrencyLevel": { "type": "integer" }, "ManagedScalingPolicy": { - "shape": "S4g" + "shape": "S4j" }, "PlacementGroupConfigs": { - "shape": "S2y" + "shape": "S31" } } }, @@ -1623,7 +1623,7 @@ ], "members": { "JobFlowIds": { - "shape": "S1m" + "shape": "S1p" }, "TerminationProtected": { "type": "boolean" @@ -1640,7 +1640,7 @@ ], "members": { "JobFlowIds": { - "shape": "S1m" + "shape": "S1p" }, "VisibleToAllUsers": { "type": "boolean" @@ -1663,12 +1663,12 @@ "NotebookExecutionName": {}, "NotebookParams": {}, "ExecutionEngine": { - "shape": "S3p" + "shape": "S3s" }, "ServiceRole": {}, "NotebookInstanceSecurityGroupId": {}, "Tags": { - "shape": "S1r" + "shape": "S1u" } } }, @@ -1698,7 +1698,7 @@ ], "members": { "JobFlowIds": { - "shape": "S1m" + "shape": "S1p" } } } @@ -1714,7 +1714,7 @@ "Name": {}, "Description": {}, "SubnetIds": { - "shape": "S26" + "shape": "S29" }, "DefaultS3Location": {} } @@ -1870,12 +1870,19 @@ "AllocationStrategy" ], "members": { - "AllocationStrategy": {} + "AllocationStrategy": {}, + "CapacityReservationOptions": { + "type": "structure", + "members": { + "UsageStrategy": {}, + "CapacityReservationPreference": {} + } + } } } } }, - "Su": { + "Sx": { "type": "list", "member": { "type": "structure", @@ -1900,12 +1907,12 @@ "shape": "Sa" }, "AutoScalingPolicy": { - "shape": "Sy" + "shape": "S11" } } } }, - "Sy": { + "S11": { "type": "structure", "required": [ "Constraints", @@ -1913,14 +1920,14 @@ ], "members": { "Constraints": { - "shape": "Sz" + "shape": "S12" }, "Rules": { - "shape": "S10" + "shape": "S13" } } }, - "Sz": { + "S12": { "type": "structure", "required": [ "MinCapacity", @@ -1935,7 +1942,7 @@ } } }, - "S10": { + "S13": { "type": "list", "member": { "type": "structure", @@ -2017,13 +2024,13 @@ } } }, - "S1f": { + "S1i": { "type": "list", "member": { - "shape": "S1g" + "shape": "S1j" } }, - "S1g": { + "S1j": { "type": "structure", "required": [ "Name", @@ -2051,21 +2058,21 @@ "Jar": {}, "MainClass": {}, "Args": { - "shape": "S1m" + "shape": "S1p" } } } } }, - "S1m": { + "S1p": { "type": "list", "member": {} }, - "S1o": { + "S1r": { "type": "list", "member": {} }, - "S1r": { + "S1u": { "type": "list", "member": { "type": "structure", @@ -2075,11 +2082,11 @@ } } }, - "S26": { + "S29": { "type": "list", "member": {} }, - "S2i": { + "S2l": { "type": "structure", "members": { "State": {}, @@ -2106,15 +2113,15 @@ } } }, - "S2o": { + "S2r": { "type": "list", "member": {} }, - "S2p": { + "S2s": { "type": "list", "member": {} }, - "S2s": { + "S2v": { "type": "list", "member": { "type": "structure", @@ -2122,7 +2129,7 @@ "Name": {}, "Version": {}, "Args": { - "shape": "S2p" + "shape": "S2s" }, "AdditionalInfo": { "shape": "Sj" @@ -2130,7 +2137,7 @@ } } }, - "S2w": { + "S2z": { "type": "structure", "required": [ "Realm", @@ -2144,7 +2151,7 @@ "ADDomainJoinPassword": {} } }, - "S2y": { + "S31": { "type": "list", "member": { "type": "structure", @@ -2157,16 +2164,16 @@ } } }, - "S3c": { + "S3f": { "type": "structure", "members": { "AvailabilityZone": {}, "AvailabilityZones": { - "shape": "S2o" + "shape": "S2r" } } }, - "S3j": { + "S3m": { "type": "structure", "required": [ "Name", @@ -2182,17 +2189,17 @@ "members": { "Path": {}, "Args": { - "shape": "S1m" + "shape": "S1p" } } } } }, - "S3l": { + "S3o": { "type": "list", "member": {} }, - "S3p": { + "S3s": { "type": "structure", "required": [ "Id" @@ -2203,7 +2210,7 @@ "MasterInstanceSecurityGroupId": {} } }, - "S3x": { + "S40": { "type": "structure", "members": { "Jar": {}, @@ -2212,11 +2219,11 @@ }, "MainClass": {}, "Args": { - "shape": "S2p" + "shape": "S2s" } } }, - "S3y": { + "S41": { "type": "structure", "members": { "State": {}, @@ -2251,7 +2258,7 @@ } } }, - "S49": { + "S4c": { "type": "structure", "required": [ "BlockPublicSecurityGroupRules" @@ -2279,7 +2286,7 @@ } } }, - "S4g": { + "S4j": { "type": "structure", "members": { "ComputeLimits": { @@ -2307,7 +2314,7 @@ } } }, - "S57": { + "S5a": { "type": "list", "member": { "type": "structure", @@ -2319,7 +2326,7 @@ } } }, - "S5k": { + "S5n": { "type": "structure", "members": { "DecommissionTimeout": { @@ -2329,10 +2336,10 @@ "type": "structure", "members": { "InstancesToTerminate": { - "shape": "S5m" + "shape": "S5p" }, "InstancesToProtect": { - "shape": "S5m" + "shape": "S5p" }, "InstanceTerminationTimeout": { "type": "integer" @@ -2341,11 +2348,11 @@ } } }, - "S5m": { + "S5p": { "type": "list", "member": {} }, - "S5o": { + "S5r": { "type": "structure", "members": { "Status": { @@ -2362,14 +2369,14 @@ } }, "Constraints": { - "shape": "Sz" + "shape": "S12" }, "Rules": { - "shape": "S10" + "shape": "S13" } } }, - "S7e": { + "S7h": { "type": "list", "member": {} } diff --git a/apis/elasticmapreduce-2009-03-31.normal.json b/apis/elasticmapreduce-2009-03-31.normal.json index 2cbc2471e2..fcacad5479 100644 --- a/apis/elasticmapreduce-2009-03-31.normal.json +++ b/apis/elasticmapreduce-2009-03-31.normal.json @@ -2699,7 +2699,7 @@ }, "BidPrice": { "shape": "String", - "documentation": "The bid price for each EC2 Spot Instance type as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice
to set the amount equal to the On-Demand price, or specify an amount in USD.
The bid price for each EC2 Spot Instance as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice
to set the amount equal to the On-Demand price, or specify an amount in USD.
The bid price for each EC2 Spot Instance as defined by InstanceType
. Expressed in USD. If neither BidPrice
nor BidPriceAsPercentageOfOnDemandPrice
is provided, BidPriceAsPercentageOfOnDemandPrice
defaults to 100%.
If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice
to set the amount equal to the On-Demand price, or specify an amount in USD.
The number of steps that can be executed concurrently. You can specify a maximum of 256 steps.
" + "documentation": "The number of steps that can be executed concurrently. You can specify a minimum of 1 step and a maximum of 256 steps.
" } } }, @@ -4086,6 +4086,33 @@ "shape": "NotebookExecutionSummary" } }, + "OnDemandCapacityReservationOptions": { + "type": "structure", + "members": { + "UsageStrategy": { + "shape": "OnDemandCapacityReservationUsageStrategy", + "documentation": "Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.
If you specify use-capacity-reservations-first
, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price
) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price
).
If you do not specify a value, the fleet fulfils the On-Demand capacity according to the chosen On-Demand allocation strategy.
" + }, + "CapacityReservationPreference": { + "shape": "OnDemandCapacityReservationPreference", + "documentation": "Indicates the instance's Capacity Reservation preferences. Possible preferences include:
open
- The instance can run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).
none
- The instance avoids running in a Capacity Reservation even if one is available. The instance runs as an On-Demand Instance.
Describes the strategy for using unused Capacity Reservations for fulfilling On-Demand capacity.
" + }, + "OnDemandCapacityReservationPreference": { + "type": "string", + "enum": [ + "open", + "none" + ] + }, + "OnDemandCapacityReservationUsageStrategy": { + "type": "string", + "enum": [ + "use-capacity-reservations-first" + ] + }, "OnDemandProvisioningAllocationStrategy": { "type": "string", "enum": [ @@ -4100,7 +4127,11 @@ "members": { "AllocationStrategy": { "shape": "OnDemandProvisioningAllocationStrategy", - "documentation": "Specifies the strategy to use in launching On-Demand Instance fleets. Currently, the only option is lowest-price (the default), which launches the lowest price first.
" + "documentation": "Specifies the strategy to use in launching On-Demand instance fleets. Currently, the only option is lowest-price
(the default), which launches the lowest price first.
The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy.
" } }, "documentation": "The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.
The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR version 5.12.1 and later.
Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents.
Both the StreamName
and the StreamARN
parameters are optional, but you must specify either the StreamName
or the StreamARN
when invoking this API operation.
An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH:
The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC
(for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC
(for AAC) or A_MS/ACM (for G.711).
Data retention must be greater than 0.
The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags.
The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format.
The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:
Get an endpoint using GetDataEndpoint, specifying GET_DASH_STREAMING_SESSION_URL
for the APIName
parameter.
Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL
. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL
returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).
Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.
The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.
Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples.
The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:
GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback.
GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp
\" and \"moov
\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.
The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.
GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof
\" and \"mdat
\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.
After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.
Data retrieved with this action is billable. See Pricing for details.
The following restrictions apply to MPEG-DASH sessions:
A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.
A Kinesis video stream can have a maximum of ten active MPEG-DASH streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia
connections on a Kinesis video stream does not count against this limit, and the number of active MPEG-DASH sessions does not count against the active GetMedia
connection limit.
The maximum limits for active HLS and MPEG-DASH streaming sessions are independent of each other.
You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes
Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.
For more information about HLS, see HTTP Live Streaming on the Apple Developer site.
If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:
x-amz-ErrorType
HTTP header – contains a more specific error type in addition to what the HTTP status code provides.
x-amz-RequestId
HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.
Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.
For more information, see the Errors section at the bottom of this topic, as well as Common Errors.
Retrieves an MPEG Dynamic Adaptive Streaming over HTTP (DASH) URL for the stream. You can then open the URL in a media player to view the stream contents.
Both the StreamName
and the StreamARN
parameters are optional, but you must specify either the StreamName
or the StreamARN
when invoking this API operation.
An Amazon Kinesis video stream has the following requirements for providing data through MPEG-DASH:
The media must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC
(for h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 should be A_AAC
(for AAC) or A_MS/ACM (for G.711).
Data retention must be greater than 0.
The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15. For information about adapting stream data to a given format, see NAL Adaptation Flags.
The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7) or the MS Wave format.
The following procedure shows how to use MPEG-DASH with Kinesis Video Streams:
Get an endpoint using GetDataEndpoint, specifying GET_DASH_STREAMING_SESSION_URL
for the APIName
parameter.
Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL
. Kinesis Video Streams creates an MPEG-DASH streaming session to be used for accessing content in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL
returns an authenticated URL (that includes an encrypted session token) for the session's MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH).
Don't share or store this token where an unauthorized entity can access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you use with your AWS credentials.
The media that is made available through the manifest consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.
Provide the URL (containing the encrypted session token) for the MPEG-DASH manifest to a media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the initialization fragment and media fragments available through the manifest URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain encoded video frames or encoded audio samples.
The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:
GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the metadata for the media that you want to playback.
GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp
\" and \"moov
\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.
The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.
GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof
\" and \"mdat
\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.
After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.
Data retrieved with this action is billable. See Pricing for details.
For restrictions that apply to MPEG-DASH sessions, see Kinesis Video Streams Limits.
You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes
Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.
For more information about HLS, see HTTP Live Streaming on the Apple Developer site.
If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:
x-amz-ErrorType
HTTP header – contains a more specific error type in addition to what the HTTP status code provides.
x-amz-RequestId
HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.
Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.
For more information, see the Errors section at the bottom of this topic, as well as Common Errors.
Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.
Both the StreamName
and the StreamARN
parameters are optional, but you must specify either the StreamName
or the StreamARN
when invoking this API operation.
An Amazon Kinesis video stream has the following requirements for providing data through HLS:
The media must contain h.264 or h.265 encoded video and, optionally, AAC encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC
(for h.264) or V_MPEG/ISO/HEVC
(for h.265). Optionally, the codec ID of track 2 should be A_AAC
.
Data retention must be greater than 0.
The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format or HEVC for H.265 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags.
The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7).
Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.
The following procedure shows how to use HLS with Kinesis Video Streams:
Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL
for the APIName
parameter.
Retrieve the HLS URL using GetHLSStreamingSessionURL
. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL
returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).
Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.
The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.
Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.
The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:
GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist
action for each track, and additional metadata for the media player, including estimated bitrate and resolution.
GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment
action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment
actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode
is LIVE
or ON_DEMAND
. The HLS media playlist is typically static for sessions with a PlaybackType
of ON_DEMAND
. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType
of LIVE
. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.
GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp
\" and \"moov
\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.
The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.
GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof
\" and \"mdat
\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.
After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.
Data retrieved with this action is billable. See Pricing for details.
GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.
If the ContainerFormat
is MPEG_TS
, this API is used instead of GetMP4InitFragment
and GetMP4MediaFragment
to retrieve stream media.
Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.
The following restrictions apply to HLS sessions:
A streaming session URL should not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.
A Kinesis video stream can have a maximum of ten active HLS streaming sessions. If a new session is created when the maximum number of sessions is already active, the oldest (earliest created) session is closed. The number of active GetMedia
connections on a Kinesis video stream does not count against this limit, and the number of active HLS sessions does not count against the active GetMedia
connection limit.
The maximum limits for active HLS and MPEG-DASH streaming sessions are independent of each other.
You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes
Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.
For more information about HLS, see HTTP Live Streaming on the Apple Developer site.
If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:
x-amz-ErrorType
HTTP header – contains a more specific error type in addition to what the HTTP status code provides.
x-amz-RequestId
HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.
Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.
For more information, see the Errors section at the bottom of this topic, as well as Common Errors.
Retrieves an HTTP Live Streaming (HLS) URL for the stream. You can then open the URL in a browser or media player to view the stream contents.
Both the StreamName
and the StreamARN
parameters are optional, but you must specify either the StreamName
or the StreamARN
when invoking this API operation.
An Amazon Kinesis video stream has the following requirements for providing data through HLS:
The media must contain h.264 or h.265 encoded video and, optionally, AAC encoded audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC
(for h.264) or V_MPEG/ISO/HEVC
(for h.265). Optionally, the codec ID of track 2 should be A_AAC
.
Data retention must be greater than 0.
The video track of each fragment must contain codec private data in the Advanced Video Coding (AVC) for H.264 format or HEVC for H.265 format (MPEG-4 specification ISO/IEC 14496-15). For information about adapting stream data to a given format, see NAL Adaptation Flags.
The audio track (if present) of each fragment must contain codec private data in the AAC format (AAC specification ISO/IEC 13818-7).
Kinesis Video Streams HLS sessions contain fragments in the fragmented MPEG-4 form (also called fMP4 or CMAF) or the MPEG-2 form (also called TS chunks, which the HLS specification also supports). For more information about HLS fragment types, see the HLS specification.
The following procedure shows how to use HLS with Kinesis Video Streams:
Get an endpoint using GetDataEndpoint, specifying GET_HLS_STREAMING_SESSION_URL
for the APIName
parameter.
Retrieve the HLS URL using GetHLSStreamingSessionURL
. Kinesis Video Streams creates an HLS streaming session to be used for accessing content in a stream using the HLS protocol. GetHLSStreamingSessionURL
returns an authenticated URL (that includes an encrypted session token) for the session's HLS master playlist (the root resource needed for streaming with HLS).
Don't share or store this token where an unauthorized entity could access it. The token provides access to the content of the stream. Safeguard the token with the same measures that you would use with your AWS credentials.
The media that is made available through the playlist consists only of the requested stream, time range, and format. No other media data (such as frames outside the requested window or alternate bitrates) is made available.
Provide the URL (containing the encrypted session token) for the HLS master playlist to a media player that supports the HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization fragment, and media fragments available through the master playlist URL. The initialization fragment contains the codec private data for the stream, and other data needed to set up the video or audio decoder and renderer. The media fragments contain H.264-encoded video frames or AAC-encoded audio samples.
The media player receives the authenticated URL and requests stream metadata and media data normally. When the media player requests data, it calls the following actions:
GetHLSMasterPlaylist: Retrieves an HLS master playlist, which contains a URL for the GetHLSMediaPlaylist
action for each track, and additional metadata for the media player, including estimated bitrate and resolution.
GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to access the MP4 initialization fragment with the GetMP4InitFragment
action, and URLs to access the MP4 media fragments with the GetMP4MediaFragment
actions. The HLS media playlist also contains metadata about the stream that the player needs to play it, such as whether the PlaybackMode
is LIVE
or ON_DEMAND
. The HLS media playlist is typically static for sessions with a PlaybackType
of ON_DEMAND
. The HLS media playlist is continually updated with new fragments for sessions with a PlaybackType
of LIVE
. There is a distinct HLS media playlist for the video track and the audio track (if applicable) that contains MP4 media URLs for the specific track.
GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player typically loads the initialization fragment before loading any media fragments. This fragment contains the \"fytp
\" and \"moov
\" MP4 atoms, and the child atoms that are needed to initialize the media player decoder.
The initialization fragment does not correspond to a fragment in a Kinesis video stream. It contains only the codec private data for the stream and respective track, which the media player needs to decode the media frames.
GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the \"moof
\" and \"mdat
\" MP4 atoms and their child atoms, containing the encoded fragment's media frames and their timestamps.
After the first media fragment is made available in a streaming session, any fragments that don't contain the same codec private data cause an error to be returned when those different media fragments are loaded. Therefore, the codec private data should not change between fragments in a session. This also means that the session fails if the fragments in a stream change from having only video to having both audio and video.
Data retrieved with this action is billable. See Pricing for details.
GetTSFragment: Retrieves MPEG TS fragments containing both initialization and media data for all tracks in the stream.
If the ContainerFormat
is MPEG_TS
, this API is used instead of GetMP4InitFragment
and GetMP4MediaFragment
to retrieve stream media.
Data retrieved with this action is billable. For more information, see Kinesis Video Streams pricing.
A streaming session URL must not be shared between players. The service might throttle a session if multiple media players are sharing it. For connection limits, see Kinesis Video Streams Limits.
You can monitor the amount of data that the media player consumes by monitoring the GetMP4MediaFragment.OutgoingBytes
Amazon CloudWatch metric. For information about using CloudWatch to monitor Kinesis Video Streams, see Monitoring Kinesis Video Streams. For pricing information, see Amazon Kinesis Video Streams Pricing and AWS Pricing. Charges for both HLS sessions and outgoing AWS data apply.
For more information about HLS, see HTTP Live Streaming on the Apple Developer site.
If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:
x-amz-ErrorType
HTTP header – contains a more specific error type in addition to what the HTTP status code provides.
x-amz-RequestId
HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.
Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.
For more information, see the Errors section at the bottom of this topic, as well as Common Errors.
Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.
You must first call the GetDataEndpoint
API to get an endpoint. Then send the GetMediaForFragmentList
requests to this endpoint using the --endpoint-url parameter.
The following limits apply when using the GetMediaForFragmentList
API:
A client can call GetMediaForFragmentList
up to five times per second per stream.
Kinesis Video Streams sends media data at a rate of up to 25 megabytes per second (or 200 megabits per second) during a GetMediaForFragmentList
session.
If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:
x-amz-ErrorType
HTTP header – contains a more specific error type in addition to what the HTTP status code provides.
x-amz-RequestId
HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.
Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.
For more information, see the Errors section at the bottom of this topic, as well as Common Errors.
Gets media for a list of fragments (specified by fragment number) from the archived data in an Amazon Kinesis video stream.
You must first call the GetDataEndpoint
API to get an endpoint. Then send the GetMediaForFragmentList
requests to this endpoint using the --endpoint-url parameter.
For limits, see Kinesis Video Streams Limits.
If an error is thrown after invoking a Kinesis Video Streams archived media API, in addition to the HTTP status code and the response body, it includes the following pieces of information:
x-amz-ErrorType
HTTP header – contains a more specific error type in addition to what the HTTP status code provides.
x-amz-RequestId
HTTP header – if you want to report an issue to AWS, the support team can better diagnose the problem if given the Request Id.
Both the HTTP status code and the ErrorType header can be utilized to make programmatic decisions about whether errors are retry-able and under what conditions, as well as provide information on what actions the client programmer might need to take in order to successfully try again.
For more information, see the Errors section at the bottom of this topic, as well as Common Errors.
The starting timestamp in the range of timestamps for which to return fragments.
This value is inclusive. Fragments that start before the StartTimestamp
and continue past it are included in the session. If FragmentSelectorType
is SERVER_TIMESTAMP
, the StartTimestamp
must be later than the stream head.
The starting timestamp in the range of timestamps for which to return fragments.
Only fragments that start exactly at or after StartTimestamp
are included in the session. Fragments that start before StartTimestamp
and continue past it aren't included in the session. If FragmentSelectorType
is SERVER_TIMESTAMP
, the StartTimestamp
must be later than the stream head.
The end of the timestamp range for the requested media.
This value must be within 3 hours of the specified StartTimestamp
, and it must be later than the StartTimestamp
value. If FragmentSelectorType
for the request is SERVER_TIMESTAMP
, this value must be in the past.
This value is inclusive. The EndTimestamp
is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp
value and continue past it are included in the session.
The end of the timestamp range for the requested media.
This value must be within 24 hours of the specified StartTimestamp
, and it must be later than the StartTimestamp
value. If FragmentSelectorType
for the request is SERVER_TIMESTAMP
, this value must be in the past.
This value is inclusive. The EndTimestamp
is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp
value and continue past it are included in the session.
The range of timestamps for which to return fragments.
The values in the ClipTimestampRange are inclusive
. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.
The range of timestamps for which to return fragments.
" }, "ContainerFormat": { "type": "string", @@ -283,6 +283,11 @@ "SERVER_TIMESTAMP" ] }, + "DASHMaxResults": { + "type": "long", + "max": 5000, + "min": 1 + }, "DASHPlaybackMode": { "type": "string", "enum": [ @@ -299,14 +304,14 @@ "members": { "StartTimestamp": { "shape": "Timestamp", - "documentation": "The start of the timestamp range for the requested media.
If the DASHTimestampRange
value is specified, the StartTimestamp
value is required.
This value is inclusive. Fragments that start before the StartTimestamp
and continue past it are included in the session. If FragmentSelectorType
is SERVER_TIMESTAMP
, the StartTimestamp
must be later than the stream head.
The start of the timestamp range for the requested media.
If the DASHTimestampRange
value is specified, the StartTimestamp
value is required.
Only fragments that start exactly at or after StartTimestamp
are included in the session. Fragments that start before StartTimestamp
and continue past it aren't included in the session. If FragmentSelectorType
is SERVER_TIMESTAMP
, the StartTimestamp
must be later than the stream head.
The end of the timestamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp
, and it must be later than the StartTimestamp
value.
If FragmentSelectorType
for the request is SERVER_TIMESTAMP
, this value must be in the past.
The EndTimestamp
value is required for ON_DEMAND
mode, but optional for LIVE_REPLAY
mode. If the EndTimestamp
is not set for LIVE_REPLAY
mode then the session will continue to include newly ingested fragments until the session expires.
This value is inclusive. The EndTimestamp
is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp
value and continue past it are included in the session.
The end of the timestamp range for the requested media. This value must be within 24 hours of the specified StartTimestamp
, and it must be later than the StartTimestamp
value.
If FragmentSelectorType
for the request is SERVER_TIMESTAMP
, this value must be in the past.
The EndTimestamp
value is required for ON_DEMAND
mode, but optional for LIVE_REPLAY
mode. If the EndTimestamp
is not set for LIVE_REPLAY
mode then the session will continue to include newly ingested fragments until the session expires.
This value is inclusive. The EndTimestamp
is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp
value and continue past it are included in the session.
The start and end of the timestamp range for the requested media.
This value should not be present if PlaybackType
is LIVE
.
The values in the DASHimestampRange
are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.
The start and end of the timestamp range for the requested media.
This value should not be present if PlaybackType
is LIVE
.
The values in DASHimestampRange
are inclusive. Fragments that start exactly at or after the start time are included in the session. Fragments that start before the start time and continue past it are not included in the session.
Whether to retrieve live, live replay, or archived, on-demand data.
Features of the three types of sessions include the following:
LIVE
: For sessions of this type, the MPEG-DASH manifest is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new manifest on a one-second interval. When this type of session is played in a media player, the user interface typically displays a \"live\" notification, with no scrubber control for choosing the position in the playback window to display.
In LIVE
mode, the newest available fragments are included in an MPEG-DASH manifest, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the MPEG-DASH manifest if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the manifest, the older fragment is not added, and the gap is not filled.
LIVE_REPLAY
: For sessions of this type, the MPEG-DASH manifest is updated similarly to how it is updated for LIVE
mode except that it starts by including fragments from a given start time. Instead of fragments being added as they are ingested, fragments are added as the duration of the next fragment elapses. For example, if the fragments in the session are two seconds long, then a new fragment is added to the manifest every two seconds. This mode is useful to be able to start playback from when an event is detected and continue live streaming media that has not yet been ingested as of the time of the session creation. This mode is also useful to stream previously archived media without being limited by the 1,000 fragment limit in the ON_DEMAND
mode.
ON_DEMAND
: For sessions of this type, the MPEG-DASH manifest contains all the fragments for the session, up to the number that is specified in MaxMediaPlaylistFragmentResults
. The manifest must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.
In all playback modes, if FragmentSelectorType
is PRODUCER_TIMESTAMP
, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the MPEG-DASH manifest. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the MPEG-DASH manifest. This can lead to unexpected behavior in the media player.
The default is LIVE
.
Whether to retrieve live, live replay, or archived, on-demand data.
Features of the three types of sessions include the following:
LIVE
: For sessions of this type, the MPEG-DASH manifest is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new manifest on a one-second interval. When this type of session is played in a media player, the user interface typically displays a \"live\" notification, with no scrubber control for choosing the position in the playback window to display.
In LIVE
mode, the newest available fragments are included in an MPEG-DASH manifest, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the MPEG-DASH manifest if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the manifest, the older fragment is not added, and the gap is not filled.
LIVE_REPLAY
: For sessions of this type, the MPEG-DASH manifest is updated similarly to how it is updated for LIVE
mode except that it starts by including fragments from a given start time. Instead of fragments being added as they are ingested, fragments are added as the duration of the next fragment elapses. For example, if the fragments in the session are two seconds long, then a new fragment is added to the manifest every two seconds. This mode is useful to be able to start playback from when an event is detected and continue live streaming media that has not yet been ingested as of the time of the session creation. This mode is also useful to stream previously archived media without being limited by the 1,000 fragment limit in the ON_DEMAND
mode.
ON_DEMAND
: For sessions of this type, the MPEG-DASH manifest contains all the fragments for the session, up to the number that is specified in MaxManifestFragmentResults
. The manifest must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.
In all playback modes, if FragmentSelectorType
is PRODUCER_TIMESTAMP
, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the MPEG-DASH manifest. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the MPEG-DASH manifest. This can lead to unexpected behavior in the media player.
The default is LIVE
.
The time in seconds until the requested session expires. This value can be between 300 (5 minutes) and 43200 (12 hours).
When a session expires, no new calls to GetDashManifest
, GetMP4InitFragment
, or GetMP4MediaFragment
can be made for that session.
The default is 300 (5 minutes).
" }, "MaxManifestFragmentResults": { - "shape": "PageLimit", + "shape": "DASHMaxResults", "documentation": "The maximum number of fragments that are returned in the MPEG-DASH manifest.
When the PlaybackMode
is LIVE
, the most recent fragments are returned up to this value. When the PlaybackMode
is ON_DEMAND
, the oldest fragments are returned, up to this maximum number.
When there are a higher number of fragments available in a live MPEG-DASH manifest, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live MPEG-DASH manifest have a minimum of 3 fragments and a maximum of 10 fragments.
The default is 5 fragments if PlaybackMode
is LIVE
or LIVE_REPLAY
, and 1,000 if PlaybackMode
is ON_DEMAND
.
The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.
" } } @@ -479,7 +484,7 @@ }, "PlaybackMode": { "shape": "HLSPlaybackMode", - "documentation": "Whether to retrieve live, live replay, or archived, on-demand data.
Features of the three types of sessions include the following:
LIVE
: For sessions of this type, the HLS media playlist is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new playlist on a one-second interval. When this type of session is played in a media player, the user interface typically displays a \"live\" notification, with no scrubber control for choosing the position in the playback window to display.
In LIVE
mode, the newest available fragments are included in an HLS media playlist, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the HLS media playlist if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the playlist, the older fragment is not added, and the gap is not filled.
LIVE_REPLAY
: For sessions of this type, the HLS media playlist is updated similarly to how it is updated for LIVE
mode except that it starts by including fragments from a given start time. Instead of fragments being added as they are ingested, fragments are added as the duration of the next fragment elapses. For example, if the fragments in the session are two seconds long, then a new fragment is added to the media playlist every two seconds. This mode is useful to be able to start playback from when an event is detected and continue live streaming media that has not yet been ingested as of the time of the session creation. This mode is also useful to stream previously archived media without being limited by the 1,000 fragment limit in the ON_DEMAND
mode.
ON_DEMAND
: For sessions of this type, the HLS media playlist contains all the fragments for the session, up to the number that is specified in MaxMediaPlaylistFragmentResults
. The playlist must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.
In all playback modes, if FragmentSelectorType
is PRODUCER_TIMESTAMP
, and if there are multiple fragments with the same start timestamp, the fragment that has the larger fragment number (that is, the newer fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.
The default is LIVE
.
Whether to retrieve live, live replay, or archived, on-demand data.
Features of the three types of sessions include the following:
LIVE
: For sessions of this type, the HLS media playlist is continually updated with the latest fragments as they become available. We recommend that the media player retrieve a new playlist on a one-second interval. When this type of session is played in a media player, the user interface typically displays a \"live\" notification, with no scrubber control for choosing the position in the playback window to display.
In LIVE
mode, the newest available fragments are included in an HLS media playlist, even if there is a gap between fragments (that is, if a fragment is missing). A gap like this might cause a media player to halt or cause a jump in playback. In this mode, fragments are not added to the HLS media playlist if they are older than the newest fragment in the playlist. If the missing fragment becomes available after a subsequent fragment is added to the playlist, the older fragment is not added, and the gap is not filled.
LIVE_REPLAY
: For sessions of this type, the HLS media playlist is updated similarly to how it is updated for LIVE
mode except that it starts by including fragments from a given start time. Instead of fragments being added as they are ingested, fragments are added as the duration of the next fragment elapses. For example, if the fragments in the session are two seconds long, then a new fragment is added to the media playlist every two seconds. This mode is useful to be able to start playback from when an event is detected and continue live streaming media that has not yet been ingested as of the time of the session creation. This mode is also useful to stream previously archived media without being limited by the 1,000 fragment limit in the ON_DEMAND
mode.
ON_DEMAND
: For sessions of this type, the HLS media playlist contains all the fragments for the session, up to the number that is specified in MaxMediaPlaylistFragmentResults
. The playlist must be retrieved only once for each session. When this type of session is played in a media player, the user interface typically displays a scrubber control for choosing the position in the playback window to display.
In all playback modes, if FragmentSelectorType
is PRODUCER_TIMESTAMP
, and if there are multiple fragments with the same start timestamp, the fragment that has the largest fragment number (that is, the newest fragment) is included in the HLS media playlist. The other fragments are not included. Fragments that have different timestamps but have overlapping durations are still included in the HLS media playlist. This can lead to unexpected behavior in the media player.
The default is LIVE
.
The time in seconds until the requested session expires. This value can be between 300 (5 minutes) and 43200 (12 hours).
When a session expires, no new calls to GetHLSMasterPlaylist
, GetHLSMediaPlaylist
, GetMP4InitFragment
, GetMP4MediaFragment
, or GetTSFragment
can be made for that session.
The default is 300 (5 minutes).
" }, "MaxMediaPlaylistFragmentResults": { - "shape": "PageLimit", - "documentation": "The maximum number of fragments that are returned in the HLS media playlists.
When the PlaybackMode
is LIVE
, the most recent fragments are returned up to this value. When the PlaybackMode
is ON_DEMAND
, the oldest fragments are returned, up to this maximum number.
When there are a higher number of fragments available in a live HLS media playlist, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live HLS media playlist have a minimum of 3 fragments and a maximum of 10 fragments.
The default is 5 fragments if PlaybackMode
is LIVE
or LIVE_REPLAY
, and 1,000 if PlaybackMode
is ON_DEMAND
.
The maximum value of 1,000 fragments corresponds to more than 16 minutes of video on streams with 1-second fragments, and more than 2 1/2 hours of video on streams with 10-second fragments.
" + "shape": "HLSMaxResults", + "documentation": "The maximum number of fragments that are returned in the HLS media playlists.
When the PlaybackMode
is LIVE
, the most recent fragments are returned up to this value. When the PlaybackMode
is ON_DEMAND
, the oldest fragments are returned, up to this maximum number.
When there are a higher number of fragments available in a live HLS media playlist, video players often buffer content before starting playback. Increasing the buffer size increases the playback latency, but it decreases the likelihood that rebuffering will occur during playback. We recommend that a live HLS media playlist have a minimum of 3 fragments and a maximum of 10 fragments.
The default is 5 fragments if PlaybackMode
is LIVE
or LIVE_REPLAY
, and 1,000 if PlaybackMode
is ON_DEMAND
.
The maximum value of 5,000 fragments corresponds to more than 80 minutes of video on streams with 1-second fragments, and more than 13 hours of video on streams with 10-second fragments.
" } } }, @@ -588,6 +593,11 @@ "SERVER_TIMESTAMP" ] }, + "HLSMaxResults": { + "type": "long", + "max": 5000, + "min": 1 + }, "HLSPlaybackMode": { "type": "string", "enum": [ @@ -604,14 +614,14 @@ "members": { "StartTimestamp": { "shape": "Timestamp", - "documentation": "The start of the timestamp range for the requested media.
If the HLSTimestampRange
value is specified, the StartTimestamp
value is required.
This value is inclusive. Fragments that start before the StartTimestamp
and continue past it are included in the session. If FragmentSelectorType
is SERVER_TIMESTAMP
, the StartTimestamp
must be later than the stream head.
The start of the timestamp range for the requested media.
If the HLSTimestampRange
value is specified, the StartTimestamp
value is required.
Only fragments that start exactly at or after StartTimestamp
are included in the session. Fragments that start before StartTimestamp
and continue past it aren't included in the session. If FragmentSelectorType
is SERVER_TIMESTAMP
, the StartTimestamp
must be later than the stream head.
The end of the timestamp range for the requested media. This value must be within 3 hours of the specified StartTimestamp
, and it must be later than the StartTimestamp
value.
If FragmentSelectorType
for the request is SERVER_TIMESTAMP
, this value must be in the past.
The EndTimestamp
value is required for ON_DEMAND
mode, but optional for LIVE_REPLAY
mode. If the EndTimestamp
is not set for LIVE_REPLAY
mode then the session will continue to include newly ingested fragments until the session expires.
This value is inclusive. The EndTimestamp
is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp
value and continue past it are included in the session.
The end of the timestamp range for the requested media. This value must be within 24 hours of the specified StartTimestamp
, and it must be later than the StartTimestamp
value.
If FragmentSelectorType
for the request is SERVER_TIMESTAMP
, this value must be in the past.
The EndTimestamp
value is required for ON_DEMAND
mode, but optional for LIVE_REPLAY
mode. If the EndTimestamp
is not set for LIVE_REPLAY
mode then the session will continue to include newly ingested fragments until the session expires.
This value is inclusive. The EndTimestamp
is compared to the (starting) timestamp of the fragment. Fragments that start before the EndTimestamp
value and continue past it are included in the session.
The start and end of the timestamp range for the requested media.
This value should not be present if PlaybackType
is LIVE
.
The values in the HLSTimestampRange
are inclusive. Fragments that begin before the start time but continue past it, or fragments that begin before the end time but continue past it, are included in the session.
The start and end of the timestamp range for the requested media.
This value should not be present if PlaybackType
is LIVE
.
The Amazon Resource Name (ARN) of the stream from which to retrieve a fragment list. Specify either this parameter or the StreamName
parameter.
The total number of fragments to return. If the total number of fragments available is more than the value specified in max-results
, then a ListFragmentsOutput$NextToken is provided in the output that you can use to resume pagination.
Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50 functions per call.
Set FunctionVersion
to ALL
to include all published versions of each function in addition to the unpublished version. To get more information about a function or version, use GetFunction.
Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50 functions per call.
Set FunctionVersion
to ALL
to include all published versions of each function in addition to the unpublished version.
The ListFunctions
action returns a subset of the FunctionConfiguration fields. To get the additional fields (State, StateReasonCode, StateReason, LastUpdateStatus, LastUpdateStatusReason, LastUpdateStatusReasonCode) for a function or version, use GetFunction.
The maximum number of functions to return.
", + "documentation": "The maximum number of functions to return in the response. Note that ListFunctions
returns a maximum of 50 items in each response, even if you set the number higher.
This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts operation and ensure that the parts list is empty.
For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
The following operations are related to AbortMultipartUpload
:
This action aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts action and ensure that the parts list is empty.
For information about permissions required to use the multipart upload, see Multipart Upload and Permissions.
The following operations are related to AbortMultipartUpload
:
Completes a multipart upload by assembling previously uploaded parts.
You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This operation concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the ETag
value, returned after that part was uploaded.
Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.
Note that if CompleteMultipartUpload
fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload.
For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
CompleteMultipartUpload
has the following special errors:
Error code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.
400 Bad Request
Error code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.
400 Bad Request
Error code: InvalidPartOrder
Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.
400 Bad Request
Error code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
404 Not Found
The following operations are related to CompleteMultipartUpload
:
Completes a multipart upload by assembling previously uploaded parts.
You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this action to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This action concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the ETag
value, returned after that part was uploaded.
Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. Because a request could fail after the initial 200 OK response has been sent, it is important that you check the response body to determine whether the request succeeded.
Note that if CompleteMultipartUpload
fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload.
For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions.
CompleteMultipartUpload
has the following special errors:
Error code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.
400 Bad Request
Error code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.
400 Bad Request
Error code: InvalidPartOrder
Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.
400 Bad Request
Error code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
404 Not Found
The following operations are related to CompleteMultipartUpload
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic operation using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.
All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy operation starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. This means that a 200 OK
response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.
If the copy is successful, you receive a response with information about the copied object.
If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.
The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
Metadata
When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.
To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive
header. When you grant permissions, you can use the s3:x-amz-metadata-directive
condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.
x-amz-copy-source-if
Headers
To only copy an object under certain conditions, such as whether the Etag
matches or whether the object was modified before or after a specified date, use the following request parameters:
x-amz-copy-source-if-match
x-amz-copy-source-if-none-match
x-amz-copy-source-if-unmodified-since
x-amz-copy-source-if-modified-since
If both the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK
and copies the data:
x-amz-copy-source-if-match
condition evaluates to true
x-amz-copy-source-if-unmodified-since
condition evaluates to false
If both the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed
response code:
x-amz-copy-source-if-none-match
condition evaluates to false
x-amz-copy-source-if-modified-since
condition evaluates to true
All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed.
Server-side encryption
When you perform a CopyObject operation, you can optionally use the appropriate encryption-related headers to encrypt the object using server-side encryption with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption.
If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.
Access Control List (ACL)-Specific Request Headers
When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
Storage Class Options
You can use the CopyObject
operation to change the storage class of an object that is already stored in Amazon S3 using the StorageClass
parameter. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.
Versioning
By default, x-amz-copy-source
identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.
If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject.
The following operations are related to CopyObject
:
For more information, see Copying Objects.
", + "documentation": "Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API. For more information, see Copy Object Using the REST Multipart Upload API.
All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. This means that a 200 OK
response can contain either a success or an error. Design your application to parse the contents of the response and handle it appropriately.
If the copy is successful, you receive a response with information about the copied object.
If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.
The copy request charge is based on the storage class and Region that you specify for the destination object. For pricing information, see Amazon S3 pricing.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
Metadata
When copying an object, you can preserve all metadata (default) or specify new metadata. However, the ACL is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.
To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive
header. When you grant permissions, you can use the s3:x-amz-metadata-directive
condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.
x-amz-copy-source-if
Headers
To only copy an object under certain conditions, such as whether the Etag
matches or whether the object was modified before or after a specified date, use the following request parameters:
x-amz-copy-source-if-match
x-amz-copy-source-if-none-match
x-amz-copy-source-if-unmodified-since
x-amz-copy-source-if-modified-since
If both the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK
and copies the data:
x-amz-copy-source-if-match
condition evaluates to true
x-amz-copy-source-if-unmodified-since
condition evaluates to false
If both the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed
response code:
x-amz-copy-source-if-none-match
condition evaluates to false
x-amz-copy-source-if-modified-since
condition evaluates to true
All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed.
Server-side encryption
When you perform a CopyObject operation, you can optionally use the appropriate encryption-related headers to encrypt the object using server-side encryption with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. For more information about server-side encryption, see Using Server-Side Encryption.
If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service User Guide.
Access Control List (ACL)-Specific Request Headers
When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
Storage Class Options
You can use the CopyObject
action to change the storage class of an object that is already stored in Amazon S3 using the StorageClass
parameter. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.
Versioning
By default, x-amz-copy-source
identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.
If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject.
The following operations are related to CopyObject
:
For more information, see Copying Objects.
", "alias": "PutObjectCopy" }, "CreateBucket": { @@ -107,7 +107,7 @@ "shape": "CreateMultipartUploadOutput" }, "documentationUrl": "http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadInitiate.html", - "documentation": "This operation initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.
For more information about multipart uploads, see Multipart Upload Overview.
If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort operation and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
For information about the permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).
After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.
You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload
.
To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt
, kms:Decrypt
, kms:ReEncrypt*
, kms:GenerateDataKey*
, and kms:DescribeKey
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.
If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.
For more information, see Protecting Data Using Server-Side Encryption.
When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:
Specify a canned ACL with the x-amz-acl
request header. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.
Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms
, but don't provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.
All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.
For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.
Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.
You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:
Specify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:
x-amz-grant-read
x-amz-grant-write
x-amz-grant-read-acp
x-amz-grant-write-acp
x-amz-grant-full-control
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an AWS account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
For example, the following x-amz-grant-read
header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:
x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
The following operations are related to CreateMultipartUpload
:
This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.
For more information about multipart uploads, see Multipart Upload Overview.
If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
For information about the permissions required to use the multipart upload API, see Multipart Upload and Permissions.
For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (AWS Signature Version 4).
After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.
You can optionally request server-side encryption. For server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can provide your own encryption key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload
.
To perform a multipart upload with encryption using an AWS KMS CMK, the requester must have permission to the kms:Encrypt
, kms:Decrypt
, kms:ReEncrypt*
, kms:GenerateDataKey*
, and kms:DescribeKey
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload.
If your AWS Identity and Access Management (IAM) user or role is in the same AWS account as the AWS KMS CMK, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.
For more information, see Protecting Data Using Server-Side Encryption.
When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:
Specify a canned ACL with the x-amz-acl
request header. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. The option you use depends on whether you want to use AWS managed encryption keys or provide your own encryption key.
Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms
, but don't provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the AWS managed CMK in AWS KMS to protect the data.
All GET and PUT requests for an object protected by AWS KMS fail if you don't make them with SSL or by using SigV4.
For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.
Use customer-provided encryption keys – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS.
You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:
Specify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
Specify access permissions explicitly — To explicitly grant access permissions to specific AWS accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:
x-amz-grant-read
x-amz-grant-write
x-amz-grant-read-acp
x-amz-grant-write-acp
x-amz-grant-full-control
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an AWS account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
For example, the following x-amz-grant-read
header grants the AWS accounts identified by account IDs permissions to read object data and its metadata:
x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
The following operations are related to CreateMultipartUpload
:
Deletes the cors
configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:PutBucketCORS
action. The bucket owner has this permission by default and can grant this permission to others.
For information about cors
, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.
Related Resources:
" + "documentation": "Deletes the cors
configuration information set for the bucket.
To use this operation, you must have permission to perform the s3:PutBucketCORS
action. The bucket owner has this permission by default and can grant this permission to others.
For information about cors
, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service User Guide.
Related Resources:
" }, "DeleteBucketEncryption": { "name": "DeleteBucketEncryption", @@ -158,7 +158,7 @@ "input": { "shape": "DeleteBucketEncryptionRequest" }, - "documentation": "This implementation of the DELETE operation removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service Developer Guide.
To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
Related Resources
" + "documentation": "This implementation of the DELETE action removes default encryption from the bucket. For information about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption in the Amazon Simple Storage Service User Guide.
To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
Related Resources
" }, "DeleteBucketIntelligentTieringConfiguration": { "name": "DeleteBucketIntelligentTieringConfiguration", @@ -232,7 +232,7 @@ "shape": "DeleteBucketPolicyRequest" }, "documentationUrl": "http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html", - "documentation": "This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the DeleteBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and UserPolicies.
The following operations are related to DeleteBucketPolicy
This implementation of the DELETE action uses the policy subresource to delete the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the DeleteBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account to use this operation.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and UserPolicies.
The following operations are related to DeleteBucketPolicy
Deletes the replication configuration from the bucket.
To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration
action. The bucket owner has these permissions by default and can grant it to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
It can take a while for the deletion of a replication configuration to fully propagate.
For information about replication configuration, see Replication in the Amazon S3 Developer Guide.
The following operations are related to DeleteBucketReplication
:
Deletes the replication configuration from the bucket.
To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration
action. The bucket owner has these permissions by default and can grant it to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
It can take a while for the deletion of a replication configuration to fully propagate.
For information about replication configuration, see Replication in the Amazon S3 Developer Guide.
The following operations are related to DeleteBucketReplication
:
This operation removes the website configuration for a bucket. Amazon S3 returns a 200 OK
response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK
response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404
response if the bucket specified in the request does not exist.
This DELETE operation requires the S3:DeleteBucketWebsite
permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite
permission.
For more information about hosting websites, see Hosting Websites on Amazon S3.
The following operations are related to DeleteBucketWebsite
:
This action removes the website configuration for a bucket. Amazon S3 returns a 200 OK
response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK
response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404
response if the bucket specified in the request does not exist.
This DELETE action requires the S3:DeleteBucketWebsite
permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite
permission.
For more information about hosting websites, see Hosting Websites on Amazon S3.
The following operations are related to DeleteBucketWebsite
:
Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.
To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, x-amz-delete-marker
, to true.
If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa
request header in the DELETE versionId
request. Requests that include x-amz-mfa
must use HTTPS.
For more information about MFA Delete, see Using MFA Delete. To see sample requests that use versioning, see Sample Request.
You can delete objects by explicitly calling the DELETE Object API or configure its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject
, s3:DeleteObjectVersion
, and s3:PutLifeCycleConfiguration
actions.
The following operation is related to DeleteObject
:
Removes the null version (if there is one) of an object and inserts a delete marker, which becomes the latest version of the object. If there isn't a null version, Amazon S3 does not remove any objects.
To remove a specific version, you must be the bucket owner and you must use the version Id subresource. Using this subresource permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header, x-amz-delete-marker
, to true.
If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa
request header in the DELETE versionId
request. Requests that include x-amz-mfa
must use HTTPS.
For more information about MFA Delete, see Using MFA Delete. To see sample requests that use versioning, see Sample Request.
You can delete objects by explicitly calling DELETE Object or configure its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject
, s3:DeleteObjectVersion
, and s3:PutLifeCycleConfiguration
actions.
The following action is related to DeleteObject
:
This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success, or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion, the operation does not return any information about the delete in the response body.
When performing this operation on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete.
Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
The following operations are related to DeleteObjects
:
This action enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this action provides a suitable alternative to sending individual delete requests, reducing per-request overhead.
The request contains a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete action and returns the result of that delete, success, or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.
The action supports two modes for the response: verbose and quiet. By default, the action uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete action encountered an error. For a successful deletion, the action does not return any information about the delete in the response body.
When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete.
Finally, the Content-MD5 header is required for all Multi-Object Delete requests. Amazon S3 uses the header value to ensure that your request body has not been altered in transit.
The following operations are related to DeleteObjects
:
This implementation of the GET operation uses the accelerate
subresource to return the Transfer Acceleration state of a bucket, which is either Enabled
or Suspended
. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.
To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
You set the Transfer Acceleration state of an existing bucket to Enabled
or Suspended
by using the PutBucketAccelerateConfiguration operation.
A GET accelerate
request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.
For more information about transfer acceleration, see Transfer Acceleration in the Amazon Simple Storage Service Developer Guide.
Related Resources
" + "documentation": "This implementation of the GET action uses the accelerate
subresource to return the Transfer Acceleration state of a bucket, which is either Enabled
or Suspended
. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.
To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
You set the Transfer Acceleration state of an existing bucket to Enabled
or Suspended
by using the PutBucketAccelerateConfiguration operation.
A GET accelerate
request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.
For more information about transfer acceleration, see Transfer Acceleration in the Amazon Simple Storage Service User Guide.
Related Resources
" }, "GetBucketAcl": { "name": "GetBucketAcl", @@ -359,7 +359,7 @@ "shape": "GetBucketAclOutput" }, "documentationUrl": "http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGETacl.html", - "documentation": "This implementation of the GET
operation uses the acl
subresource to return the access control list (ACL) of a bucket. To use GET
to return the ACL of the bucket, you must have READ_ACP
access to the bucket. If READ_ACP
permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.
Related Resources
" + "documentation": "This implementation of the GET
action uses the acl
subresource to return the access control list (ACL) of a bucket. To use GET
to return the ACL of the bucket, you must have READ_ACP
access to the bucket. If READ_ACP
permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.
Related Resources
" }, "GetBucketAnalyticsConfiguration": { "name": "GetBucketAnalyticsConfiguration", @@ -373,7 +373,7 @@ "output": { "shape": "GetBucketAnalyticsConfigurationOutput" }, - "documentation": "This implementation of the GET operation returns an analytics configuration (identified by the analytics configuration ID) from the bucket.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon Simple Storage Service Developer Guide.
Related Resources
This implementation of the GET action returns an analytics configuration (identified by the analytics configuration ID) from the bucket.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis in the Amazon Simple Storage Service User Guide.
Related Resources
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier API description, see GetBucketLifecycle.
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycleConfiguration
:
Bucket lifecycle configuration now supports specifying a lifecycle rule using an object key name prefix, one or more object tags, or a combination of both. Accordingly, this section describes the latest API. The response describes the new filter element that you can use to specify a filter to select a subset of objects to which the rule applies. If you are using a previous version of the lifecycle configuration, it still works. For the earlier action, see GetBucketLifecycle.
Returns the lifecycle configuration information set on the bucket. For information about lifecycle configuration, see Object Lifecycle Management.
To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration
action. The bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to GetBucketLifecycleConfiguration
:
Returns the notification configuration of a bucket.
If notifications are not enabled on the bucket, the operation returns an empty NotificationConfiguration
element.
By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket owner can use a bucket policy to grant permission to other users to read this configuration with the s3:GetBucketNotification
permission.
For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket Events. For more information about bucket policies, see Using Bucket Policies.
The following operation is related to GetBucketNotification
:
Returns the notification configuration of a bucket.
If notifications are not enabled on the bucket, the action returns an empty NotificationConfiguration
element.
By default, you must be the bucket owner to read the notification configuration of a bucket. However, the bucket owner can use a bucket policy to grant permission to other users to read this configuration with the s3:GetBucketNotification
permission.
For more information about setting and reading the notification configuration on a bucket, see Setting Up Notification of Bucket Events. For more information about bucket policies, see Using Bucket Policies.
The following action is related to GetBucketNotification
:
Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
The following operation is related to GetBucketPolicy
:
Returns the policy of a specified bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have GetBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
The following action is related to GetBucketPolicy
:
Returns the replication configuration of a bucket.
It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.
For information about replication configuration, see Replication in the Amazon Simple Storage Service Developer Guide.
This operation requires permissions for the s3:GetReplicationConfiguration
action. For more information about permissions, see Using Bucket Policies and User Policies.
If you include the Filter
element in a replication configuration, you must also include the DeleteMarkerReplication
and Priority
elements. The response also returns those elements.
For information about GetBucketReplication
errors, see List of replication-related error codes
The following operations are related to GetBucketReplication
:
Returns the replication configuration of a bucket.
It can take a while to propagate the put or delete a replication configuration to all Amazon S3 systems. Therefore, a get request soon after put or delete can return a wrong result.
For information about replication configuration, see Replication in the Amazon Simple Storage Service User Guide.
This action requires permissions for the s3:GetReplicationConfiguration
action. For more information about permissions, see Using Bucket Policies and User Policies.
If you include the Filter
element in a replication configuration, you must also include the DeleteMarkerReplication
and Priority
elements. The response also returns those elements.
For information about GetBucketReplication
errors, see List of replication-related error codes
The following operations are related to GetBucketReplication
:
Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.
This GET operation requires the S3:GetBucketWebsite
permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite
permission.
The following operations are related to DeleteBucketWebsite
:
Returns the website configuration for a bucket. To host website on Amazon S3, you can configure a bucket as website by adding a website configuration. For more information about hosting websites, see Hosting Websites on Amazon S3.
This GET action requires the S3:GetBucketWebsite
permission. By default, only the bucket owner can read the bucket website configuration. However, bucket owners can allow other users to read the website configuration by writing a bucket policy granting them the S3:GetBucketWebsite
permission.
The following operations are related to DeleteBucketWebsite
:
Retrieves objects from Amazon S3. To use GET
, you must have READ
access to the object. If you grant READ
access to the anonymous user, you can return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg
, you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object in the GET
operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
, specify the resource as /photos/2006/February/sample.jpg
. For a path-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket
, specify the resource as /examplebucket/photos/2006/February/sample.jpg
. For more information about request types, see HTTP Host Header Bucket Specification.
To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.
If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this operation returns an InvalidObjectStateError
error. For information about restoring archived objects, see Restoring Archived Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging
action), the response also returns the x-amz-tagging-count
header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.
Permissions
You need the s3:GetObject
permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will return an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 will return an HTTP status code 403 (\"access denied\") error.
Versioning
By default, the GET operation returns the current version of an object. To return a different version, use the versionId
subresource.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
For more information about versioning, see PutBucketVersioning.
Overriding Response Header Values
There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.
You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type
, Content-Language
, Expires
, Cache-Control
, Content-Disposition
, and Content-Encoding
. To override these header values in the GET response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.
response-content-type
response-content-language
response-expires
response-cache-control
response-content-disposition
response-content-encoding
Additional Considerations about Request Headers
If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows: If-Match
condition evaluates to true
, and; If-Unmodified-Since
condition evaluates to false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows: If-None-Match
condition evaluates to false
, and; If-Modified-Since
condition evaluates to true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
The following operations are related to GetObject
:
Retrieves objects from Amazon S3. To use GET
, you must have READ
access to the object. If you grant READ
access to the anonymous user, you can return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer file system. You can, however, create a logical hierarchy by using object key names that imply a folder structure. For example, instead of naming an object sample.jpg
, you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object in the GET
operation. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
, specify the resource as /photos/2006/February/sample.jpg
. For a path-style request example, if you have the object photos/2006/February/sample.jpg
in the bucket named examplebucket
, specify the resource as /examplebucket/photos/2006/February/sample.jpg
. For more information about request types, see HTTP Host Header Bucket Specification.
To distribute large files to many people, you can save bandwidth costs by using BitTorrent. For more information, see Amazon S3 Torrent. For more information about returning the ACL of an object, see GetObjectAcl.
If the object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a copy using RestoreObject. Otherwise, this action returns an InvalidObjectStateError
error. For information about restoring archived objects, see Restoring Archived Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging
action), the response also returns the x-amz-tagging-count
header that provides the count of number of tags associated with the object. You can use GetObjectTagging to retrieve the tag set associated with an object.
Permissions
You need the s3:GetObject
permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will return an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 will return an HTTP status code 403 (\"access denied\") error.
Versioning
By default, the GET action returns the current version of an object. To return a different version, use the versionId
subresource.
If the current version of the object is a delete marker, Amazon S3 behaves as if the object was deleted and includes x-amz-delete-marker: true
in the response.
For more information about versioning, see PutBucketVersioning.
Overriding Response Header Values
There are times when you want to override certain response header values in a GET response. For example, you might override the Content-Disposition response header value in your GET request.
You can override values for a set of response headers using the following query parameters. These response header values are sent only on a successful request, that is, when status code 200 OK is returned. The set of headers you can override using these parameters is a subset of the headers that Amazon S3 accepts when you create an object. The response headers that you can override for the GET response are Content-Type
, Content-Language
, Expires
, Cache-Control
, Content-Disposition
, and Content-Encoding
. To override these header values in the GET response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL, when using these parameters. They cannot be used with an unsigned (anonymous) request.
response-content-type
response-content-language
response-expires
response-cache-control
response-content-disposition
response-content-encoding
Additional Considerations about Request Headers
If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows: If-Match
condition evaluates to true
, and; If-Unmodified-Since
condition evaluates to false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows: If-None-Match
condition evaluates to false
, and; If-Modified-Since
condition evaluates to true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
The following operations are related to GetObject
:
Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.
To use this operation, you must have permission to perform the s3:GetObjectTagging
action. By default, the GET operation returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging
action.
By default, the bucket owner has this permission and can grant this permission to others.
For information about the Amazon S3 object tagging feature, see Object Tagging.
The following operation is related to GetObjectTagging
:
Returns the tag-set of an object. You send the GET request against the tagging subresource associated with the object.
To use this operation, you must have permission to perform the s3:GetObjectTagging
action. By default, the GET action returns information about current version of an object. For a versioned bucket, you can have multiple versions of an object in your bucket. To retrieve tags of any other version, use the versionId query parameter. You also need permission for the s3:GetObjectVersionTagging
action.
By default, the bucket owner has this permission and can grant this permission to others.
For information about the Amazon S3 object tagging feature, see Object Tagging.
The following action is related to GetObjectTagging
:
Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For more information about BitTorrent, see Using BitTorrent with Amazon S3.
You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.
To use GET, you must have READ access to the object.
This action is not supported by Amazon S3 on Outposts.
The following operation is related to GetObjectTorrent
:
Returns torrent files from a bucket. BitTorrent can save you bandwidth when you're distributing large files. For more information about BitTorrent, see Using BitTorrent with Amazon S3.
You can get torrent only for objects that are less than 5 GB in size, and that are not encrypted using server-side encryption with a customer-provided encryption key.
To use GET, you must have READ access to the object.
This action is not supported by Amazon S3 on Outposts.
The following action is related to GetObjectTorrent
:
This operation is useful to determine if a bucket exists and you have permission to access it. The operation returns a 200 OK
if the bucket exists and you have permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD
request returns a generic 404 Not Found
or 403 Forbidden
code. A message body is not included, so you cannot determine the exception beyond these error codes.
To use this operation, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
This action is useful to determine if a bucket exists and you have permission to access it. The action returns a 200 OK
if the bucket exists and you have permission to access it.
If the bucket does not exist or you do not have permission to access it, the HEAD
request returns a generic 404 Not Found
or 403 Forbidden
code. A message body is not included, so you cannot determine the exception beyond these error codes.
To use this operation, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The HEAD operation retrieves metadata from an object without returning the object itself. This operation is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.
A HEAD
request has the same options as a GET
operation on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic 404 Not Found
or 403 Forbidden
code. It is not possible to retrieve the exact exception beyond these error codes.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
The last modified property in this case is the creation date of the object.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
Consider the following when using request headers:
Consideration 1 – If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows:
If-Match
condition evaluates to true
, and;
If-Unmodified-Since
condition evaluates to false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows:
If-None-Match
condition evaluates to false
, and;
If-Modified-Since
condition evaluates to true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
Permissions
You need the s3:GetObject
permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 (\"access denied\") error.
The following operation is related to HeadObject
:
The HEAD action retrieves metadata from an object without returning the object itself. This action is useful if you're only interested in an object's metadata. To use HEAD, you must have READ access to the object.
A HEAD
request has the same options as a GET
action on an object. The response is identical to the GET
response except that there is no response body. Because of this, if the HEAD
request generates an error, it returns a generic 404 Not Found
or 403 Forbidden
code. It is not possible to retrieve the exact exception beyond these error codes.
If you encrypt an object by using server-side encryption with customer-provided encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the metadata from the object, you must use the following headers:
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys).
Encryption request headers, like x-amz-server-side-encryption
, should not be sent for GET requests if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
The last modified property in this case is the creation date of the object.
Request headers are limited to 8 KB in size. For more information, see Common Request Headers.
Consider the following when using request headers:
Consideration 1 – If both of the If-Match
and If-Unmodified-Since
headers are present in the request as follows:
If-Match
condition evaluates to true
, and;
If-Unmodified-Since
condition evaluates to false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and If-Modified-Since
headers are present in the request as follows:
If-None-Match
condition evaluates to false
, and;
If-Modified-Since
condition evaluates to true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
Permissions
You need the s3:GetObject
permission for this operation. For more information, see Specifying Permissions in a Policy. If the object you request does not exist, the error Amazon S3 returns depends on whether you also have the s3:ListBucket permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 returns an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP status code 403 (\"access denied\") error.
The following action is related to HeadObject
:
Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This operation supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there will be a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to ListBucketAnalyticsConfigurations
:
Lists the analytics configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. You should always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there will be a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to ListBucketAnalyticsConfigurations
:
Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
The following operations are related to ListBucketInventoryConfigurations
:
Returns a list of inventory configurations for the bucket. You can have up to 1,000 analytics configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 inventory feature, see Amazon S3 Inventory
The following operations are related to ListBucketInventoryConfigurations
:
Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.
This operation supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token
in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to ListBucketMetricsConfigurations
:
Lists the metrics configurations for the bucket. The metrics configurations are only for the request metrics of the bucket and do not provide information on daily storage metrics. You can have up to 1,000 configurations per bucket.
This action supports list pagination and does not return more than 100 configurations at a time. Always check the IsTruncated
element in the response. If there are no more configurations to list, IsTruncated
is set to false. If there are more configurations to list, IsTruncated
is set to true, and there is a value in NextContinuationToken
. You use the NextContinuationToken
value to continue the pagination of the list by passing the value in continuation-token
in the request to GET
the next page.
To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For more information about metrics configurations and CloudWatch request metrics, see Monitoring Metrics with Amazon CloudWatch.
The following operations are related to ListBucketMetricsConfigurations
:
This operation lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.
This operation returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads
parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an IsTruncated
element with the value true. To list the additional multipart uploads, use the key-marker
and upload-id-marker
request parameters.
In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
The following operations are related to ListMultipartUploads
:
This action lists in-progress multipart uploads. An in-progress multipart upload is a multipart upload that has been initiated using the Initiate Multipart Upload request, but has not yet been completed or aborted.
This action returns at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the maximum number of uploads a response can include, which is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads
parameter in the response. If additional multipart uploads satisfy the list criteria, the response will contain an IsTruncated
element with the value true. To list the additional multipart uploads, use the key-marker
and upload-id-marker
request parameters.
In the response, the uploads are sorted by key. If your application has initiated more than one multipart upload using the same object key, then uploads in the response are first sorted by key. Additionally, uploads are sorted in ascending order within each key by the upload initiation time.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload and Permissions.
The following operations are related to ListMultipartUploads
:
Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.
This API has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects
.
The following operations are related to ListObjects
:
Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.
This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects
.
The following operations are related to ListObjects
:
Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. Objects are returned sorted in an ascending order of the respective key names in the list.
To use this operation, you must have READ access to the bucket.
To use this operation in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
This section describes the latest revision of the API. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.
To get a list of your buckets, see ListBuckets.
The following operations are related to ListObjectsV2
:
Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK
response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. Objects are returned sorted in an ascending order of the respective key names in the list.
To use this operation, you must have READ access to the bucket.
To use this action in an AWS Identity and Access Management (IAM) policy, you must have permissions to perform the s3:ListBucket
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
This section describes the latest revision of this action. We recommend that you use this revised API for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API, ListObjects.
To get a list of your buckets, see ListBuckets.
The following operations are related to ListObjectsV2
:
Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts
request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated
field with the value of true, and a NextPartNumberMarker
element. In subsequent ListParts
requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker
field value from the previous response.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload API and Permissions.
The following operations are related to ListParts
:
Lists the parts that have been uploaded for a specific multipart upload. This operation must include the upload ID, which you obtain by sending the initiate multipart upload request (see CreateMultipartUpload). This request returns a maximum of 1,000 uploaded parts. The default number of parts returned is 1,000 parts. You can restrict the number of parts returned by specifying the max-parts
request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated
field with the value of true, and a NextPartNumberMarker
element. In subsequent ListParts
requests you can include the part-number-marker query string parameter and set its value to the NextPartNumberMarker
field value from the previous response.
For more information on multipart uploads, see Uploading Objects Using Multipart Upload.
For information on permissions required to use the multipart upload API, see Multipart Upload and Permissions.
The following operations are related to ListParts
:
Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.
To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The Transfer Acceleration state of a bucket can be set to one of the following two values:
Enabled – Enables accelerated data transfers to the bucket.
Suspended – Disables accelerated data transfers to the bucket.
The GetBucketAccelerateConfiguration operation returns the transfer acceleration state of a bucket.
After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.
The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").
For more information about transfer acceleration, see Transfer Acceleration.
The following operations are related to PutBucketAccelerateConfiguration
:
Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to Amazon S3.
To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
The Transfer Acceleration state of a bucket can be set to one of the following two values:
Enabled – Enables accelerated data transfers to the bucket.
Suspended – Disables accelerated data transfers to the bucket.
The GetBucketAccelerateConfiguration action returns the transfer acceleration state of a bucket.
After setting the Transfer Acceleration state of a bucket to Enabled, it might take up to thirty minutes before the data transfer rates to the bucket increase.
The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").
For more information about transfer acceleration, see Transfer Acceleration.
The following operations are related to PutBucketAccelerateConfiguration
:
Sets the cors
configuration for your bucket. If the configuration exists, Amazon S3 replaces it.
To use this operation, you must be allowed to perform the s3:PutBucketCORS
action. By default, the bucket owner has this permission and can grant it to others.
You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com
to access your Amazon S3 bucket at my.example.bucket.com
by using the browser's XMLHttpRequest
capability.
To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
subresource to the bucket. The cors
subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.
When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors
configuration on the bucket and uses the first CORSRule
rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:
The request's Origin
header must match AllowedOrigin
elements.
The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method
header in case of a pre-flight OPTIONS
request must be one of the AllowedMethod
elements.
Every header specified in the Access-Control-Request-Headers
request header of a pre-flight request must match an AllowedHeader
element.
For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.
Related Resources
", + "documentation": "Sets the cors
configuration for your bucket. If the configuration exists, Amazon S3 replaces it.
To use this operation, you must be allowed to perform the s3:PutBucketCORS
action. By default, the bucket owner has this permission and can grant it to others.
You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com
to access your Amazon S3 bucket at my.example.bucket.com
by using the browser's XMLHttpRequest
capability.
To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
subresource to the bucket. The cors
subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.
When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors
configuration on the bucket and uses the first CORSRule
rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:
The request's Origin
header must match AllowedOrigin
elements.
The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method
header in case of a pre-flight OPTIONS
request must be one of the AllowedMethod
elements.
Every header specified in the Access-Control-Request-Headers
request header of a pre-flight request must match an AllowedHeader
element.
For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service User Guide.
Related Resources
", "httpChecksumRequired": true }, "PutBucketEncryption": { @@ -1030,7 +1030,7 @@ "input": { "shape": "PutBucketEncryptionRequest" }, - "documentation": "This operation uses the encryption
subresource to configure default encryption and Amazon S3 Bucket Key for an existing bucket.
Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default encryption, see Amazon S3 default bucket encryption in the Amazon Simple Storage Service Developer Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.
This operation requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).
To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
Related Resources
", + "documentation": "This action uses the encryption
subresource to configure default encryption and Amazon S3 Bucket Key for an existing bucket.
Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default encryption, see Amazon S3 default bucket encryption in the Amazon Simple Storage Service User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service User Guide.
This action requires AWS Signature Version 4. For more information, see Authenticating Requests (AWS Signature Version 4).
To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
Related Resources
", "httpChecksumRequired": true }, "PutBucketIntelligentTieringConfiguration": { @@ -1053,7 +1053,7 @@ "input": { "shape": "PutBucketInventoryConfigurationRequest" }, - "documentation": "This implementation of the PUT
operation adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.
When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service Developer Guide.
You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
Special Errors
HTTP 400 Bad Request Error
Code: InvalidArgument
Cause: Invalid Argument
HTTP 400 Bad Request Error
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP 403 Forbidden Error
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration
bucket permission to set the configuration on the bucket.
Related Resources
This implementation of the PUT
action adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same AWS Region as the source bucket.
When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon Simple Storage Service User Guide.
You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
Special Errors
HTTP 400 Bad Request Error
Code: InvalidArgument
Cause: Invalid Argument
HTTP 400 Bad Request Error
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP 403 Forbidden Error
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration
bucket permission to set the configuration on the bucket.
Related Resources
For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service Developer Guide.
By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.
Related Resources
GetBucketLifecycle(Deprecated)
By default, a resource owner—in this case, a bucket owner, which is the AWS account that created the bucket—can perform any of the operations. A resource owner can also grant others permission to perform the operation. For more information, see the following topics in the Amazon Simple Storage Service Developer Guide:
For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon Simple Storage Service User Guide.
By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the AWS account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.
Related Resources
GetBucketLifecycle(Deprecated)
By default, a resource owner—in this case, a bucket owner, which is the AWS account that created the bucket—can perform any of the operations. A resource owner can also grant others permission to perform the operation. For more information, see the following topics in the Amazon Simple Storage Service User Guide:
Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.
Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.
By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration
.
<NotificationConfiguration>
</NotificationConfiguration>
This operation replaces the existing notification configuration with the configuration you include in the request body.
After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.
You can disable notifications by adding the empty NotificationConfiguration element.
By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT operation will fail, and Amazon S3 will not add the configuration to your bucket.
Responses
If the configuration in the request body includes only one TopicConfiguration
specifying only the s3:ReducedRedundancyLostObject
event type, the response will also include the x-amz-sns-test-message-id
header containing the message ID of the test notification sent to the topic.
The following operation is related to PutBucketNotificationConfiguration
:
Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.
Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.
By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration
.
<NotificationConfiguration>
</NotificationConfiguration>
This action replaces the existing notification configuration with the configuration you include in the request body.
After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of AWS Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.
You can disable notifications by adding the empty NotificationConfiguration element.
By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket.
Responses
If the configuration in the request body includes only one TopicConfiguration
specifying only the s3:ReducedRedundancyLostObject
event type, the response will also include the x-amz-sns-test-message-id
header containing the message ID of the test notification sent to the topic.
The following action is related to PutBucketNotificationConfiguration
:
Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.
To perform this operation, the user or role performing the operation must have the iam:PassRole permission.
Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.
A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.
To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication
, Status
, and Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.
For information about enabling versioning on a bucket, see Using Versioning.
By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.
Handling Replication of Encrypted Objects
By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria
, SseKmsEncryptedObjects
, Status
, EncryptionConfiguration
, and ReplicaKmsKeyID
. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.
For information on PutBucketReplication
errors, see List of replication-related error codes
The following operations are related to PutBucketReplication
:
Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 Developer Guide.
To perform this operation, the user or role performing the action must have the iam:PassRole permission.
Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.
A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.
To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication
, Status
, and Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.
For information about enabling versioning on a bucket, see Using Versioning.
By default, a resource owner, in this case the AWS account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.
Handling Replication of Encrypted Objects
By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following: SourceSelectionCriteria
, SseKmsEncryptedObjects
, Status
, EncryptionConfiguration
, and ReplicaKmsKeyID
. For information about replication configuration, see Replicating Objects Created with SSE Using CMKs stored in AWS KMS.
For information on PutBucketReplication
errors, see List of replication-related error codes
The following operations are related to PutBucketReplication
:
Sets the tags for a bucket.
Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.
Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.
To use this operation, you must have permissions to perform the s3:PutBucketTagging
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors:
Error code: InvalidTagError
Description: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For information about tag restrictions, see User-Defined Tag Restrictions and AWS-Generated Cost Allocation Tag Restrictions.
Error code: MalformedXMLError
Description: The XML provided does not match the schema.
Error code: OperationAbortedError
Description: A conflicting conditional operation is currently in progress against this resource. Please try again.
Error code: InternalError
Description: The service was unable to apply the provided tag to the bucket.
The following operations are related to PutBucketTagging
:
Sets the tags for a bucket.
Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.
Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.
To use this operation, you must have permissions to perform the s3:PutBucketTagging
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors:
Error code: InvalidTagError
Description: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For information about tag restrictions, see User-Defined Tag Restrictions and AWS-Generated Cost Allocation Tag Restrictions.
Error code: MalformedXMLError
Description: The XML provided does not match the schema.
Error code: OperationAbortedError
Description: A conflicting conditional action is currently in progress against this resource. Please try again.
Error code: InternalError
Description: The service was unable to apply the provided tag to the bucket.
The following operations are related to PutBucketTagging
:
Sets the configuration of the website that is specified in the website
subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.
This PUT operation requires the S3:PutBucketWebsite
permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite
permission.
To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.
WebsiteConfiguration
RedirectAllRequestsTo
HostName
Protocol
If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.
WebsiteConfiguration
IndexDocument
Suffix
ErrorDocument
Key
RoutingRules
RoutingRule
Condition
HttpErrorCodeReturnedEquals
KeyPrefixEquals
Redirect
Protocol
HostName
ReplaceKeyPrefixWith
ReplaceKeyWith
HttpRedirectCode
Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "Sets the configuration of the website that is specified in the website
subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.
This PUT action requires the S3:PutBucketWebsite
permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite
permission.
To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.
WebsiteConfiguration
RedirectAllRequestsTo
HostName
Protocol
If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.
WebsiteConfiguration
IndexDocument
Suffix
ErrorDocument
Key
RoutingRules
RoutingRule
Condition
HttpErrorCodeReturnedEquals
KeyPrefixEquals
Redirect
Protocol
HostName
ReplaceKeyPrefixWith
ReplaceKeyWith
HttpRedirectCode
Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon Simple Storage Service User Guide.
", "httpChecksumRequired": true }, "PutObject": { @@ -1232,7 +1232,7 @@ "shape": "PutObjectOutput" }, "documentationUrl": "http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUT.html", - "documentation": "Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.
Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.
To ensure that data is not corrupted traversing the network, use the Content-MD5
header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.
The Content-MD5
header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon Simple Storage Service Developer Guide.
Server-side Encryption
You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption.
If you request server-side encryption using AWS Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.
Access Control List (ACL)-Specific Request Headers
You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
Storage Class Options
By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.
Versioning
If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.
For more information about versioning, see Adding Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.
Related Resources
" + "documentation": "Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.
Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object locking; if you need this, make sure to build it into your application layer or use versioning instead.
To ensure that data is not corrupted traversing the network, use the Content-MD5
header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.
The Content-MD5
header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon Simple Storage Service Developer Guide.
Server-side Encryption
You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts the data when you access it. You have the option to provide your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side Encryption.
If you request server-side encryption using AWS Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service User Guide.
Access Control List (ACL)-Specific Request Headers
You can use headers to grant ACL- based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual AWS accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
Storage Class Options
By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 Service Developer Guide.
Versioning
If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects.
For more information about versioning, see Adding Objects to Versioning Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.
Related Resources
" }, "PutObjectAcl": { "name": "PutObjectAcl", @@ -1252,7 +1252,7 @@ } ], "documentationUrl": "http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectPUTacl.html", - "documentation": "Uses the acl
subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have WRITE_ACP
permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon Simple Storage Service Developer Guide.
This action is not supported by Amazon S3 on Outposts.
Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 Developer Guide.
Access Permissions
You can set access permissions using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-ac
l. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers, you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl
header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an AWS account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
For example, the following x-amz-grant-read
header grants list objects permission to the two AWS accounts identified by their email addresses.
x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
Grantee Values
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request.
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
Versioning
The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId
subresource.
Related Resources
", + "documentation": "Uses the acl
subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have WRITE_ACP
permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon Simple Storage Service User Guide.
This action is not supported by Amazon S3 on Outposts.
Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 Developer Guide.
Access Permissions
You can set access permissions using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-ac
l. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers, you specify explicit access permissions and grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl
header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an AWS account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an AWS account
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
For example, the following x-amz-grant-read
header grants list objects permission to the two AWS accounts identified by their email addresses.
x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
Grantee Values
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request.
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
Using email addresses to specify a grantee is only supported in the following AWS Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (São Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the AWS General Reference.
Versioning
The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId
subresource.
Related Resources
", "httpChecksumRequired": true }, "PutObjectLegalHold": { @@ -1312,7 +1312,7 @@ "output": { "shape": "PutObjectTaggingOutput" }, - "documentation": "Sets the supplied tag-set to an object that already exists in a bucket.
A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.
For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
To use this operation, you must have permission to perform the s3:PutObjectTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
To put tags of any other version, use the versionId
query parameter. You also need permission for the s3:PutObjectVersionTagging
action.
For information about the Amazon S3 object tagging feature, see Object Tagging.
Special Errors
Code: InvalidTagError
Cause: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.
Code: MalformedXMLError
Cause: The XML provided does not match the schema.
Code: OperationAbortedError
Cause: A conflicting conditional operation is currently in progress against this resource. Please try again.
Code: InternalError
Cause: The service was unable to apply the provided tag to the object.
Related Resources
", + "documentation": "Sets the supplied tag-set to an object that already exists in a bucket.
A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.
For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
To use this operation, you must have permission to perform the s3:PutObjectTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
To put tags of any other version, use the versionId
query parameter. You also need permission for the s3:PutObjectVersionTagging
action.
For information about the Amazon S3 object tagging feature, see Object Tagging.
Special Errors
Code: InvalidTagError
Cause: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.
Code: MalformedXMLError
Cause: The XML provided does not match the schema.
Code: OperationAbortedError
Cause: A conflicting conditional action is currently in progress against this resource. Please try again.
Code: InternalError
Cause: The service was unable to apply the provided tag to the object.
Related Resources
", "httpChecksumRequired": true }, "PutPublicAccessBlock": { @@ -1345,7 +1345,7 @@ } ], "documentationUrl": "http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectRestore.html", - "documentation": "Restores an archived copy of an object back into Amazon S3
This action is not supported by Amazon S3 on Outposts.
This action performs the following types of requests:
select
- Perform a select query on an archived object
restore an archive
- Restore an archived object
To use this operation, you must have permissions to perform the s3:RestoreObject
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service Developer Guide.
Querying Archives with Select Requests
You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.
When making a select request, do the following:
Define an output location for the select query's output. This must be an Amazon S3 bucket in the same AWS Region as the bucket that contains the archive object that is being queried. The AWS account that initiates the job must have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output objects stored in the bucket. For more information about output, see Querying Archived Objects in the Amazon Simple Storage Service Developer Guide.
For more information about the S3
structure in the request body, see the following:
Managing Access with ACLs in the Amazon Simple Storage Service Developer Guide
Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide
Define the SQL expression for the SELECT
type of restoration for your query in the request body's SelectParameters
structure. You can use expressions like the following examples.
The following expression returns all records from the specified object.
SELECT * FROM Object
Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.
SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
If you have headers and you set the fileHeaderInfo
in the CSV
structure in the request body to USE
, you can specify headers in the query. (If you set the fileHeaderInfo
field to IGNORE
, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.
SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.
When making a select request, you can also do the following:
To expedite your queries, specify the Expedited
tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.
The following are additional important facts about the select feature:
The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle policy.
You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.
Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409
.
Restoring objects
Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in Archive Access or Deep Archive Access tiers you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.
To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.
When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier
element of the request body:
Expedited
- Expedited retrievals allow you to quickly access your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for a subset of archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
Standard
- Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.
Bulk
- Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling you to retrieve large amounts, even petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for objects stored in S3 Intelligent-Tiering.
For more information about archive retrieval options and provisioned capacity for Expedited
data access, see Restoring Archived Objects in the Amazon Simple Storage Service Developer Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon Simple Storage Service Developer Guide.
To get the status of object restoration, you can send a HEAD
request. Operations return the x-amz-restore
header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service Developer Guide.
After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.
If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.
Responses
A successful operation returns either the 200 OK
or 202 Accepted
status code.
If the object is not previously restored, then Amazon S3 returns 202 Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the response.
Special Errors
Code: RestoreAlreadyInProgress
Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: GlacierExpeditedRetrievalNotAvailable
Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)
HTTP Status Code: 503
SOAP Fault Code Prefix: N/A
Related Resources
SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide
Restores an archived copy of an object back into Amazon S3
This action is not supported by Amazon S3 on Outposts.
This action performs the following types of requests:
select
- Perform a select query on an archived object
restore an archive
- Restore an archived object
To use this operation, you must have permissions to perform the s3:RestoreObject
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon Simple Storage Service User Guide.
Querying Archives with Select Requests
You use a select type of request to perform SQL queries on archived objects. The archived objects that are being queried by the select request must be formatted as uncompressed comma-separated values (CSV) files. You can run queries and custom analytics on your archived data without having to restore your data to a hotter Amazon S3 tier. For an overview about select requests, see Querying Archived Objects in the Amazon Simple Storage Service User Guide.
When making a select request, do the following:
Define an output location for the select query's output. This must be an Amazon S3 bucket in the same AWS Region as the bucket that contains the archive object that is being queried. The AWS account that initiates the job must have permissions to write to the S3 bucket. You can specify the storage class and encryption for the output objects stored in the bucket. For more information about output, see Querying Archived Objects in the Amazon Simple Storage Service User Guide.
For more information about the S3
structure in the request body, see the following:
Managing Access with ACLs in the Amazon Simple Storage Service User Guide
Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service User Guide
Define the SQL expression for the SELECT
type of restoration for your query in the request body's SelectParameters
structure. You can use expressions like the following examples.
The following expression returns all records from the specified object.
SELECT * FROM Object
Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.
SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
If you have headers and you set the fileHeaderInfo
in the CSV
structure in the request body to USE
, you can specify headers in the query. (If you set the fileHeaderInfo
field to IGNORE
, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.
SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
For more information about using SQL with S3 Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service User Guide.
When making a select request, you can also do the following:
To expedite your queries, specify the Expedited
tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.
The following are additional important facts about the select feature:
The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle policy.
You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests.
Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409
.
Restoring objects
Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are not accessible in real time. For objects in Archive Access or Deep Archive Access tiers you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate a restore request, and then wait until a temporary copy of the object is available. To access an archived object, you must restore the object for the duration (number of days) that you specify.
To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.
When restoring an archived object (or using a select request), you can specify one of the following data access tier options in the Tier
element of the request body:
Expedited
- Expedited retrievals allow you to quickly access your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for a subset of archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
Standard
- Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.
Bulk
- Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling you to retrieve large amounts, even petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for objects stored in S3 Intelligent-Tiering.
For more information about archive retrieval options and provisioned capacity for Expedited
data access, see Restoring Archived Objects in the Amazon Simple Storage Service User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon Simple Storage Service User Guide.
To get the status of object restoration, you can send a HEAD
request. Operations return the x-amz-restore
header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon Simple Storage Service User Guide.
After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.
If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon Simple Storage Service User Guide.
Responses
A successful action returns either the 200 OK
or 202 Accepted
status code.
If the object is not previously restored, then Amazon S3 returns 202 Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the response.
Special Errors
Code: RestoreAlreadyInProgress
Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: GlacierExpeditedRetrievalNotAvailable
Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)
HTTP Status Code: 503
SOAP Fault Code Prefix: N/A
Related Resources
SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service User Guide
This operation filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.
This action is not supported by Amazon S3 on Outposts.
For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service Developer Guide.
For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service Developer Guide.
Permissions
You must have s3:GetObject
permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service Developer Guide.
Object Data Formats
You can use Amazon S3 Select to query objects that have the following format properties:
CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.
UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.
GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.
Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.
For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon Simple Storage Service Developer Guide.
For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.
Working with the Response Body
Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding
header with chunked
as its value in the response. For more information, see Appendix: SelectObjectContent Response .
GetObject Support
The SelectObjectContent
operation does not support the following GetObject
functionality. For more information, see GetObject.
Range
: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.
GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY
storage classes. For more information, about storage classes see Storage Classes in the Amazon Simple Storage Service Developer Guide.
Special Errors
For a list of special errors for this operation, see List of SELECT Object Content Error Codes
Related Resources
" + "documentation": "This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.
This action is not supported by Amazon S3 on Outposts.
For more information about Amazon S3 Select, see Selecting Content from Objects in the Amazon Simple Storage Service User Guide.
For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select in the Amazon Simple Storage Service User Guide.
Permissions
You must have s3:GetObject
permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon Simple Storage Service User Guide.
Object Data Formats
You can use Amazon S3 Select to query objects that have the following format properties:
CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.
UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.
GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.
Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.
For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon Simple Storage Service User Guide.
For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon Simple Storage Service User Guide.
Working with the Response Body
Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding
header with chunked
as its value in the response. For more information, see Appendix: SelectObjectContent Response .
GetObject Support
The SelectObjectContent
action does not support the following GetObject
functionality. For more information, see GetObject.
Range
: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.
GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY
storage classes. For more information, about storage classes see Storage Classes in the Amazon Simple Storage Service User Guide.
Special Errors
For a list of special errors for this operation, see List of SELECT Object Content Error Codes
Related Resources
" }, "UploadPart": { "name": "UploadPart", @@ -1379,7 +1379,7 @@ "shape": "UploadPartOutput" }, "documentationUrl": "http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPart.html", - "documentation": "Uploads a part in a multipart upload.
In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.
You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.
Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit on the last part of your multipart upload.
To ensure that data is not corrupted when traversing the network, specify the Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.
If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256
header as a checksum instead of Content-MD5
. For more information see Authenticating Requests: Using the Authorization Header (AWS Signature Version 4).
Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
For more information on multipart uploads, go to Multipart Upload Overview in the Amazon Simple Storage Service Developer Guide .
For information on the permissions required to use the multipart upload API, go to Multipart Upload API and Permissions in the Amazon Simple Storage Service Developer Guide.
You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it for you when you access it. You have the option of providing your own encryption key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon Simple Storage Service Developer Guide.
Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.
If you requested server-side encryption using a customer-provided encryption key in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
Special Errors
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Related Resources
" + "documentation": "Uploads a part in a multipart upload.
In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.
You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.
Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten. Each part must be at least 5 MB in size, except the last part. There is no size limit on the last part of your multipart upload.
To ensure that data is not corrupted when traversing the network, specify the Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.
If the upload request is signed with Signature Version 4, then AWS S3 uses the x-amz-content-sha256
header as a checksum instead of Content-MD5
. For more information see Authenticating Requests: Using the Authorization Header (AWS Signature Version 4).
Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
For more information on multipart uploads, go to Multipart Upload Overview in the Amazon Simple Storage Service User Guide .
For information on the permissions required to use the multipart upload API, go to Multipart Upload and Permissions in the Amazon Simple Storage Service User Guide.
You can optionally request server-side encryption where Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it for you when you access it. You have the option of providing your own encryption key, or you can use the AWS managed encryption keys. If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon Simple Storage Service User Guide.
Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key, you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.
If you requested server-side encryption using a customer-provided encryption key in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
Special Errors
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Related Resources
" }, "UploadPartCopy": { "name": "UploadPartCopy", @@ -1394,7 +1394,7 @@ "shape": "UploadPartCopyOutput" }, "documentationUrl": "http://docs.amazonwebservices.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html", - "documentation": "Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source
in your request and a byte range by adding the request header x-amz-copy-source-range
in your request.
The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon Simple Storage Service Developer Guide.
Instead of using an existing object as part data, you might use the UploadPart operation and provide data in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.
For more information about using the UploadPartCopy
operation, see the following:
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon Simple Storage Service Developer Guide.
For information about permissions required to use the multipart upload API, see Multipart Upload API and Permissions in the Amazon Simple Storage Service Developer Guide.
For information about copying objects using a single atomic operation vs. the multipart upload, see Operations on Objects in the Amazon Simple Storage Service Developer Guide.
For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.
Note the following additional considerations about the request headers x-amz-copy-source-if-match
, x-amz-copy-source-if-none-match
, x-amz-copy-source-if-unmodified-since
, and x-amz-copy-source-if-modified-since
:
Consideration 1 - If both of the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request as follows:
x-amz-copy-source-if-match
condition evaluates to true
, and;
x-amz-copy-source-if-unmodified-since
condition evaluates to false
;
Amazon S3 returns 200 OK
and copies the data.
Consideration 2 - If both of the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request as follows:
x-amz-copy-source-if-none-match
condition evaluates to false
, and;
x-amz-copy-source-if-modified-since
condition evaluates to true
;
Amazon S3 returns 412 Precondition Failed
response code.
Versioning
If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source
identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source
, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source
and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source
.
You can optionally specify a specific version of the source object to copy by adding the versionId
subresource as shown in the following example:
x-amz-copy-source: /bucket/object?versionId=version id
Special Errors
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Code: InvalidRequest
Cause: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Related Resources
Uploads a part by copying data from an existing object as data source. You specify the data source by adding the request header x-amz-copy-source
in your request and a byte range by adding the request header x-amz-copy-source-range
in your request.
The minimum allowable part size for a multipart upload is 5 MB. For more information about multipart upload limits, go to Quick Facts in the Amazon Simple Storage Service User Guide.
Instead of using an existing object as part data, you might use the UploadPart action and provide data in your request.
You must initiate a multipart upload before you can upload any part. In response to your initiate request. Amazon S3 returns a unique identifier, the upload ID, that you must include in your upload part request.
For more information about using the UploadPartCopy
operation, see the following:
For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon Simple Storage Service User Guide.
For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon Simple Storage Service User Guide.
For information about copying objects using a single atomic action vs. the multipart upload, see Operations on Objects in the Amazon Simple Storage Service User Guide.
For information about using server-side encryption with customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and UploadPart.
Note the following additional considerations about the request headers x-amz-copy-source-if-match
, x-amz-copy-source-if-none-match
, x-amz-copy-source-if-unmodified-since
, and x-amz-copy-source-if-modified-since
:
Consideration 1 - If both of the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request as follows:
x-amz-copy-source-if-match
condition evaluates to true
, and;
x-amz-copy-source-if-unmodified-since
condition evaluates to false
;
Amazon S3 returns 200 OK
and copies the data.
Consideration 2 - If both of the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request as follows:
x-amz-copy-source-if-none-match
condition evaluates to false
, and;
x-amz-copy-source-if-modified-since
condition evaluates to true
;
Amazon S3 returns 412 Precondition Failed
response code.
Versioning
If your bucket has versioning enabled, you could have multiple versions of the same object. By default, x-amz-copy-source
identifies the current version of the object to copy. If the current version is a delete marker and you don't specify a versionId in the x-amz-copy-source
, Amazon S3 returns a 404 error, because the object does not exist. If you specify versionId in the x-amz-copy-source
and the versionId is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not allowed to specify a delete marker as a version for the x-amz-copy-source
.
You can optionally specify a specific version of the source object to copy by adding the versionId
subresource as shown in the following example:
x-amz-copy-source: /bucket/object?versionId=version id
Special Errors
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
Code: InvalidRequest
Cause: The specified copy source is not supported as a byte-range copy source.
HTTP Status Code: 400 Bad Request
Related Resources
The bucket name to which the upload was taking place.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name to which the upload was taking place.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -1966,7 +1966,7 @@ }, "Bucket": { "shape": "BucketName", - "documentation": "The name of the bucket that contains the newly created object.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
" + "documentation": "The name of the bucket that contains the newly created object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
" }, "Key": { "shape": "ObjectKey", @@ -2230,7 +2230,7 @@ }, "Bucket": { "shape": "BucketName", - "documentation": "The name of the destination bucket.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The name of the destination bucket.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -2398,7 +2398,7 @@ }, "BucketKeyEnabled": { "shape": "BucketKeyEnabled", - "documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with a COPY operation doesn’t affect bucket-level settings for S3 Bucket Key.
", + "documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with a COPY action doesn’t affect bucket-level settings for S3 Bucket Key.
", "location": "header", "locationName": "x-amz-server-side-encryption-bucket-key-enabled" }, @@ -2626,7 +2626,7 @@ }, "Bucket": { "shape": "BucketName", - "documentation": "The name of the bucket to which the multipart upload was initiated.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The name of the bucket to which the multipart upload was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "locationName": "Bucket" }, "Key": { @@ -2695,7 +2695,7 @@ }, "Bucket": { "shape": "BucketName", - "documentation": "The name of the bucket to which to initiate the upload
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The name of the bucket to which to initiate the upload
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -2809,7 +2809,7 @@ }, "SSEKMSKeyId": { "shape": "SSEKMSKeyId", - "documentation": "Specifies the ID of the symmetric customer managed AWS KMS CMK to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.
", + "documentation": "Specifies the ID of the symmetric customer managed AWS KMS CMK to use for object encryption. All GET and PUT requests for an object protected by AWS KMS will fail if not made via SSL or using SigV4. For information about configuring using any of the officially supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 Developer Guide.
", "location": "header", "locationName": "x-amz-server-side-encryption-aws-kms-key-id" }, @@ -2821,7 +2821,7 @@ }, "BucketKeyEnabled": { "shape": "BucketKeyEnabled", - "documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with an object operation doesn’t affect bucket-level settings for S3 Bucket Key.
", + "documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with an object action doesn’t affect bucket-level settings for S3 Bucket Key.
", "location": "header", "locationName": "x-amz-server-side-encryption-bucket-key-enabled" }, @@ -3280,7 +3280,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name of the bucket containing the object.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name of the bucket containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -3341,7 +3341,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name containing the objects from which to remove the tags.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name containing the objects from which to remove the tags.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -3379,7 +3379,7 @@ }, "Errors": { "shape": "Errors", - "documentation": "Container for a failed delete operation that describes the object that Amazon S3 attempted to delete and the error it encountered.
", + "documentation": "Container for a failed delete action that describes the object that Amazon S3 attempted to delete and the error it encountered.
", "locationName": "Error" } } @@ -3393,7 +3393,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name containing the objects to delete.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name containing the objects to delete.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -3595,7 +3595,7 @@ }, "Code": { "shape": "Code", - "documentation": "The error code is a string that uniquely identifies an error condition. It is meant to be read and understood by programs that detect and handle errors by type.
Amazon S3 error codes
Code: AccessDenied
Description: Access Denied
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AccountProblem
Description: There is a problem with your AWS account that prevents the operation from completing successfully. Contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AllAccessDisabled
Description: All access to this Amazon S3 resource has been disabled. Contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AmbiguousGrantByEmailAddress
Description: The email address you provided is associated with more than one account.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: AuthorizationHeaderMalformed
Description: The authorization header you provided is invalid.
HTTP Status Code: 400 Bad Request
HTTP Status Code: N/A
Code: BadDigest
Description: The Content-MD5 you specified did not match what we received.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: BucketAlreadyExists
Description: The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: BucketAlreadyOwnedByYou
Description: The bucket you tried to create already exists, and you own it. Amazon S3 returns this error in all AWS Regions except in the North Virginia Region. For legacy compatibility, if you re-create an existing bucket that you already own in the North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).
Code: 409 Conflict (in all Regions except the North Virginia Region)
SOAP Fault Code Prefix: Client
Code: BucketNotEmpty
Description: The bucket you tried to delete is not empty.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: CredentialsNotSupported
Description: This request does not support credentials.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: CrossLocationLoggingProhibited
Description: Cross-location logging not allowed. Buckets in one geographic location cannot log information to a bucket in another location.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: EntityTooLarge
Description: Your proposed upload exceeds the maximum allowed object size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: ExpiredToken
Description: The provided token has expired.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IllegalVersioningConfigurationException
Description: Indicates that the versioning configuration specified in the request is invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IncompleteBody
Description: You did not provide the number of bytes specified by the Content-Length HTTP header
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IncorrectNumberOfFilesInPostRequest
Description: POST requires exactly one file upload per request.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InlineDataTooLarge
Description: Inline data exceeds the maximum allowed size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InternalError
Description: We encountered an internal error. Please try again.
HTTP Status Code: 500 Internal Server Error
SOAP Fault Code Prefix: Server
Code: InvalidAccessKeyId
Description: The AWS access key ID you provided does not exist in our records.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidAddressingHeader
Description: You must specify the Anonymous role.
HTTP Status Code: N/A
SOAP Fault Code Prefix: Client
Code: InvalidArgument
Description: Invalid Argument
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidBucketName
Description: The specified bucket is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidBucketState
Description: The request is not valid with the current state of the bucket.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: InvalidDigest
Description: The Content-MD5 you specified is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidEncryptionAlgorithmError
Description: The encryption request you specified is not valid. The valid value is AES256.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidLocationConstraint
Description: The specified location constraint is not valid. For more information about Regions, see How to Select a Region for Your Buckets.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidObjectState
Description: The operation is not valid for the current state of the object.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidPartOrder
Description: The list of parts was not in ascending order. Parts list must be specified in order by part number.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidPayer
Description: All access to this object has been disabled. Please contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidPolicyDocument
Description: The content of the form does not meet the conditions specified in the policy document.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidRange
Description: The requested range cannot be satisfied.
HTTP Status Code: 416 Requested Range Not Satisfiable
SOAP Fault Code Prefix: Client
Code: InvalidRequest
Description: Please use AWS4-HMAC-SHA256.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: SOAP requests must be made over an HTTPS connection.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported for buckets with non-DNS compliant names.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported for buckets with periods (.) in their names.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate endpoint only supports virtual style requests.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate is not configured on this bucket.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate is disabled on this bucket.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported on this bucket. Contact AWS Support for more information.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration cannot be enabled on this bucket. Contact AWS Support for more information.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidSecurity
Description: The provided security credentials are not valid.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidSOAPRequest
Description: The SOAP request body is invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidStorageClass
Description: The storage class you specified is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidTargetBucketForLogging
Description: The target bucket for logging does not exist, is not owned by you, or does not have the appropriate grants for the log-delivery group.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidToken
Description: The provided token is malformed or otherwise invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidURI
Description: Couldn't parse the specified URI.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: KeyTooLongError
Description: Your key is too long.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedACLError
Description: The XML you provided was not well-formed or did not validate against our published schema.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedPOSTRequest
Description: The body of your POST request is not well-formed multipart/form-data.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedXML
Description: This happens when the user sends malformed XML (XML that doesn't conform to the published XSD) for the configuration. The error message is, \"The XML you provided was not well-formed or did not validate against our published schema.\"
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MaxMessageLengthExceeded
Description: Your request was too big.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MaxPostPreDataLengthExceededError
Description: Your POST request fields preceding the upload file were too large.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MetadataTooLarge
Description: Your metadata headers exceed the maximum allowed metadata size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MethodNotAllowed
Description: The specified method is not allowed against this resource.
HTTP Status Code: 405 Method Not Allowed
SOAP Fault Code Prefix: Client
Code: MissingAttachment
Description: A SOAP attachment was expected, but none were found.
HTTP Status Code: N/A
SOAP Fault Code Prefix: Client
Code: MissingContentLength
Description: You must provide the Content-Length HTTP header.
HTTP Status Code: 411 Length Required
SOAP Fault Code Prefix: Client
Code: MissingRequestBodyError
Description: This happens when the user sends an empty XML document as a request. The error message is, \"Request body is empty.\"
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MissingSecurityElement
Description: The SOAP 1.1 request is missing a security element.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MissingSecurityHeader
Description: Your request is missing a required header.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: NoLoggingStatusForKey
Description: There is no such thing as a logging status subresource for a key.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: NoSuchBucket
Description: The specified bucket does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchBucketPolicy
Description: The specified bucket does not have a bucket policy.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchKey
Description: The specified key does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchVersion
Description: Indicates that the version ID specified in the request does not match an existing version.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NotImplemented
Description: A header you provided implies functionality that is not implemented.
HTTP Status Code: 501 Not Implemented
SOAP Fault Code Prefix: Server
Code: NotSignedUp
Description: Your account is not signed up for the Amazon S3 service. You must sign up before you can use Amazon S3. You can sign up at the following URL: https://aws.amazon.com/s3
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: OperationAborted
Description: A conflicting conditional operation is currently in progress against this resource. Try again.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: PermanentRedirect
Description: The bucket you are attempting to access must be addressed using the specified endpoint. Send all future requests to this endpoint.
HTTP Status Code: 301 Moved Permanently
SOAP Fault Code Prefix: Client
Code: PreconditionFailed
Description: At least one of the preconditions you specified did not hold.
HTTP Status Code: 412 Precondition Failed
SOAP Fault Code Prefix: Client
Code: Redirect
Description: Temporary redirect.
HTTP Status Code: 307 Moved Temporarily
SOAP Fault Code Prefix: Client
Code: RestoreAlreadyInProgress
Description: Object restore is already in progress.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: RequestIsNotMultiPartContent
Description: Bucket POST must be of the enclosure-type multipart/form-data.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: RequestTimeout
Description: Your socket connection to the server was not read from or written to within the timeout period.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: RequestTimeTooSkewed
Description: The difference between the request time and the server's time is too large.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: RequestTorrentOfBucketError
Description: Requesting the torrent file of a bucket is not permitted.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: SignatureDoesNotMatch
Description: The request signature we calculated does not match the signature you provided. Check your AWS secret access key and signing method. For more information, see REST Authentication and SOAP Authentication for details.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: ServiceUnavailable
Description: Reduce your request rate.
HTTP Status Code: 503 Service Unavailable
SOAP Fault Code Prefix: Server
Code: SlowDown
Description: Reduce your request rate.
HTTP Status Code: 503 Slow Down
SOAP Fault Code Prefix: Server
Code: TemporaryRedirect
Description: You are being redirected to the bucket while DNS updates.
HTTP Status Code: 307 Moved Temporarily
SOAP Fault Code Prefix: Client
Code: TokenRefreshRequired
Description: The provided token must be refreshed.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: TooManyBuckets
Description: You have attempted to create more buckets than allowed.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UnexpectedContent
Description: This request does not support content.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UnresolvableGrantByEmailAddress
Description: The email address you provided does not match any account on record.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UserKeyMustBeSpecified
Description: The bucket POST must contain the specified field name. If it is specified, check the order of the fields.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
The error code is a string that uniquely identifies an error condition. It is meant to be read and understood by programs that detect and handle errors by type.
Amazon S3 error codes
Code: AccessDenied
Description: Access Denied
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AccountProblem
Description: There is a problem with your AWS account that prevents the action from completing successfully. Contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AllAccessDisabled
Description: All access to this Amazon S3 resource has been disabled. Contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: AmbiguousGrantByEmailAddress
Description: The email address you provided is associated with more than one account.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: AuthorizationHeaderMalformed
Description: The authorization header you provided is invalid.
HTTP Status Code: 400 Bad Request
HTTP Status Code: N/A
Code: BadDigest
Description: The Content-MD5 you specified did not match what we received.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: BucketAlreadyExists
Description: The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: BucketAlreadyOwnedByYou
Description: The bucket you tried to create already exists, and you own it. Amazon S3 returns this error in all AWS Regions except in the North Virginia Region. For legacy compatibility, if you re-create an existing bucket that you already own in the North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access control lists (ACLs).
Code: 409 Conflict (in all Regions except the North Virginia Region)
SOAP Fault Code Prefix: Client
Code: BucketNotEmpty
Description: The bucket you tried to delete is not empty.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: CredentialsNotSupported
Description: This request does not support credentials.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: CrossLocationLoggingProhibited
Description: Cross-location logging not allowed. Buckets in one geographic location cannot log information to a bucket in another location.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: EntityTooLarge
Description: Your proposed upload exceeds the maximum allowed object size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: ExpiredToken
Description: The provided token has expired.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IllegalVersioningConfigurationException
Description: Indicates that the versioning configuration specified in the request is invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IncompleteBody
Description: You did not provide the number of bytes specified by the Content-Length HTTP header
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: IncorrectNumberOfFilesInPostRequest
Description: POST requires exactly one file upload per request.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InlineDataTooLarge
Description: Inline data exceeds the maximum allowed size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InternalError
Description: We encountered an internal error. Please try again.
HTTP Status Code: 500 Internal Server Error
SOAP Fault Code Prefix: Server
Code: InvalidAccessKeyId
Description: The AWS access key ID you provided does not exist in our records.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidAddressingHeader
Description: You must specify the Anonymous role.
HTTP Status Code: N/A
SOAP Fault Code Prefix: Client
Code: InvalidArgument
Description: Invalid Argument
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidBucketName
Description: The specified bucket is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidBucketState
Description: The request is not valid with the current state of the bucket.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: InvalidDigest
Description: The Content-MD5 you specified is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidEncryptionAlgorithmError
Description: The encryption request you specified is not valid. The valid value is AES256.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidLocationConstraint
Description: The specified location constraint is not valid. For more information about Regions, see How to Select a Region for Your Buckets.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidObjectState
Description: The action is not valid for the current state of the object.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidPartOrder
Description: The list of parts was not in ascending order. Parts list must be specified in order by part number.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidPayer
Description: All access to this object has been disabled. Please contact AWS Support for further assistance.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidPolicyDocument
Description: The content of the form does not meet the conditions specified in the policy document.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidRange
Description: The requested range cannot be satisfied.
HTTP Status Code: 416 Requested Range Not Satisfiable
SOAP Fault Code Prefix: Client
Code: InvalidRequest
Description: Please use AWS4-HMAC-SHA256.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: SOAP requests must be made over an HTTPS connection.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported for buckets with non-DNS compliant names.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported for buckets with periods (.) in their names.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate endpoint only supports virtual style requests.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate is not configured on this bucket.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Accelerate is disabled on this bucket.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration is not supported on this bucket. Contact AWS Support for more information.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidRequest
Description: Amazon S3 Transfer Acceleration cannot be enabled on this bucket. Contact AWS Support for more information.
HTTP Status Code: 400 Bad Request
Code: N/A
Code: InvalidSecurity
Description: The provided security credentials are not valid.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: InvalidSOAPRequest
Description: The SOAP request body is invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidStorageClass
Description: The storage class you specified is not valid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidTargetBucketForLogging
Description: The target bucket for logging does not exist, is not owned by you, or does not have the appropriate grants for the log-delivery group.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidToken
Description: The provided token is malformed or otherwise invalid.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: InvalidURI
Description: Couldn't parse the specified URI.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: KeyTooLongError
Description: Your key is too long.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedACLError
Description: The XML you provided was not well-formed or did not validate against our published schema.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedPOSTRequest
Description: The body of your POST request is not well-formed multipart/form-data.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MalformedXML
Description: This happens when the user sends malformed XML (XML that doesn't conform to the published XSD) for the configuration. The error message is, \"The XML you provided was not well-formed or did not validate against our published schema.\"
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MaxMessageLengthExceeded
Description: Your request was too big.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MaxPostPreDataLengthExceededError
Description: Your POST request fields preceding the upload file were too large.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MetadataTooLarge
Description: Your metadata headers exceed the maximum allowed metadata size.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MethodNotAllowed
Description: The specified method is not allowed against this resource.
HTTP Status Code: 405 Method Not Allowed
SOAP Fault Code Prefix: Client
Code: MissingAttachment
Description: A SOAP attachment was expected, but none were found.
HTTP Status Code: N/A
SOAP Fault Code Prefix: Client
Code: MissingContentLength
Description: You must provide the Content-Length HTTP header.
HTTP Status Code: 411 Length Required
SOAP Fault Code Prefix: Client
Code: MissingRequestBodyError
Description: This happens when the user sends an empty XML document as a request. The error message is, \"Request body is empty.\"
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MissingSecurityElement
Description: The SOAP 1.1 request is missing a security element.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: MissingSecurityHeader
Description: Your request is missing a required header.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: NoLoggingStatusForKey
Description: There is no such thing as a logging status subresource for a key.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: NoSuchBucket
Description: The specified bucket does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchBucketPolicy
Description: The specified bucket does not have a bucket policy.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchKey
Description: The specified key does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NoSuchVersion
Description: Indicates that the version ID specified in the request does not match an existing version.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
Code: NotImplemented
Description: A header you provided implies functionality that is not implemented.
HTTP Status Code: 501 Not Implemented
SOAP Fault Code Prefix: Server
Code: NotSignedUp
Description: Your account is not signed up for the Amazon S3 service. You must sign up before you can use Amazon S3. You can sign up at the following URL: https://aws.amazon.com/s3
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: OperationAborted
Description: A conflicting conditional action is currently in progress against this resource. Try again.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: PermanentRedirect
Description: The bucket you are attempting to access must be addressed using the specified endpoint. Send all future requests to this endpoint.
HTTP Status Code: 301 Moved Permanently
SOAP Fault Code Prefix: Client
Code: PreconditionFailed
Description: At least one of the preconditions you specified did not hold.
HTTP Status Code: 412 Precondition Failed
SOAP Fault Code Prefix: Client
Code: Redirect
Description: Temporary redirect.
HTTP Status Code: 307 Moved Temporarily
SOAP Fault Code Prefix: Client
Code: RestoreAlreadyInProgress
Description: Object restore is already in progress.
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: RequestIsNotMultiPartContent
Description: Bucket POST must be of the enclosure-type multipart/form-data.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: RequestTimeout
Description: Your socket connection to the server was not read from or written to within the timeout period.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: RequestTimeTooSkewed
Description: The difference between the request time and the server's time is too large.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: RequestTorrentOfBucketError
Description: Requesting the torrent file of a bucket is not permitted.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: SignatureDoesNotMatch
Description: The request signature we calculated does not match the signature you provided. Check your AWS secret access key and signing method. For more information, see REST Authentication and SOAP Authentication for details.
HTTP Status Code: 403 Forbidden
SOAP Fault Code Prefix: Client
Code: ServiceUnavailable
Description: Reduce your request rate.
HTTP Status Code: 503 Service Unavailable
SOAP Fault Code Prefix: Server
Code: SlowDown
Description: Reduce your request rate.
HTTP Status Code: 503 Slow Down
SOAP Fault Code Prefix: Server
Code: TemporaryRedirect
Description: You are being redirected to the bucket while DNS updates.
HTTP Status Code: 307 Moved Temporarily
SOAP Fault Code Prefix: Client
Code: TokenRefreshRequired
Description: The provided token must be refreshed.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: TooManyBuckets
Description: You have attempted to create more buckets than allowed.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UnexpectedContent
Description: This request does not support content.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UnresolvableGrantByEmailAddress
Description: The email address you provided does not match any account on record.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Code: UserKeyMustBeSpecified
Description: The bucket POST must contain the specified field name. If it is specified, check the order of the fields.
HTTP Status Code: 400 Bad Request
SOAP Fault Code Prefix: Client
Optional configuration to replicate existing source bucket objects. For more information, see Replicating Existing Objects in the Amazon S3 Developer Guide.
" + "documentation": "Optional configuration to replicate existing source bucket objects. For more information, see Replicating Existing Objects in the Amazon S3 Developer Guide.
" }, "ExistingObjectReplicationStatus": { "type": "string", @@ -4439,7 +4439,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name that contains the object for which to get the ACL information.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name that contains the object for which to get the ACL information.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -4487,7 +4487,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name containing the object whose Legal Hold status you want to retrieve.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name containing the object whose Legal Hold status you want to retrieve.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -4534,7 +4534,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket whose Object Lock configuration you want to retrieve.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket whose Object Lock configuration you want to retrieve.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -4574,7 +4574,7 @@ }, "Restore": { "shape": "Restore", - "documentation": "Provides information about object restoration operation and expiration time of the restored object copy.
", + "documentation": "Provides information about object restoration action and expiration time of the restored object copy.
", "location": "header", "locationName": "x-amz-restore" }, @@ -4751,7 +4751,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name containing the object.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -4889,7 +4889,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name containing the object whose retention settings you want to retrieve.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name containing the object whose retention settings you want to retrieve.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -4945,7 +4945,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name containing the object for which to get the tagging information.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name containing the object for which to get the tagging information.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -5144,7 +5144,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -5349,7 +5349,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The name of the bucket containing the object.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The name of the bucket containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -6245,7 +6245,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The name of the bucket to which the multipart upload was initiated.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The name of the bucket to which the multipart upload was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -6382,7 +6382,7 @@ }, "MaxKeys": { "shape": "MaxKeys", - "documentation": "Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker.
", + "documentation": "Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more. If additional keys satisfy the search criteria, but were not returned because max-keys was exceeded, the response contains <isTruncated>true</isTruncated>. To return the additional keys, see key-marker and version-id-marker.
", "location": "querystring", "locationName": "max-keys" }, @@ -6459,7 +6459,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The name of the bucket containing the objects.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The name of the bucket containing the objects.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -6482,7 +6482,7 @@ }, "MaxKeys": { "shape": "MaxKeys", - "documentation": "Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.
", + "documentation": "Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.
", "location": "querystring", "locationName": "max-keys" }, @@ -6519,7 +6519,7 @@ }, "Name": { "shape": "BucketName", - "documentation": "The bucket name.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
" + "documentation": "The bucket name.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
" }, "Prefix": { "shape": "Prefix", @@ -6531,7 +6531,7 @@ }, "MaxKeys": { "shape": "MaxKeys", - "documentation": "Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.
" + "documentation": "Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.
" }, "CommonPrefixes": { "shape": "CommonPrefixList", @@ -6567,7 +6567,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "Bucket name to list.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "Bucket name to list.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -6585,7 +6585,7 @@ }, "MaxKeys": { "shape": "MaxKeys", - "documentation": "Sets the maximum number of keys returned in the response. By default the API returns up to 1,000 key names. The response might contain fewer keys but will never contain more.
", + "documentation": "Sets the maximum number of keys returned in the response. By default the action returns up to 1,000 key names. The response might contain fewer keys but will never contain more.
", "location": "querystring", "locationName": "max-keys" }, @@ -6704,7 +6704,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The name of the bucket to which the parts are being uploaded.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The name of the bucket to which the parts are being uploaded.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -7531,7 +7531,7 @@ "locationName": "RestrictPublicBuckets" } }, - "documentation": "The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.
" + "documentation": "The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon Simple Storage Service User Guide.
" }, "PutBucketAccelerateConfigurationRequest": { "type": "structure", @@ -7686,7 +7686,7 @@ }, "CORSConfiguration": { "shape": "CORSConfiguration", - "documentation": "Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing in the Amazon Simple Storage Service User Guide.
", "locationName": "CORSConfiguration", "xmlNamespace": { "uri": "http://s3.amazonaws.com/doc/2006-03-01/" @@ -8310,7 +8310,7 @@ }, "Bucket": { "shape": "BucketName", - "documentation": "The bucket name that contains the object to which you want to attach the ACL.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name that contains the object to which you want to attach the ACL.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -8352,7 +8352,7 @@ }, "Key": { "shape": "ObjectKey", - "documentation": "Key for which the PUT operation was initiated.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "Key for which the PUT action was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Key" }, @@ -8395,7 +8395,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name containing the object that you want to place a Legal Hold on.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name containing the object that you want to place a Legal Hold on.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -8579,7 +8579,7 @@ }, "Bucket": { "shape": "BucketName", - "documentation": "The bucket name to which the PUT operation was initiated.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name to which the PUT action was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -8657,7 +8657,7 @@ }, "Key": { "shape": "ObjectKey", - "documentation": "Object key for which the PUT operation was initiated.
", + "documentation": "Object key for which the PUT action was initiated.
", "location": "uri", "locationName": "Key" }, @@ -8717,7 +8717,7 @@ }, "BucketKeyEnabled": { "shape": "BucketKeyEnabled", - "documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with a PUT operation doesn’t affect bucket-level settings for S3 Bucket Key.
", + "documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true
causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS.
Specifying this header with a PUT action doesn’t affect bucket-level settings for S3 Bucket Key.
", "location": "header", "locationName": "x-amz-server-side-encryption-bucket-key-enabled" }, @@ -8778,7 +8778,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name that contains the object you want to apply this Object Retention configuration to.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name that contains the object you want to apply this Object Retention configuration to.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -8809,7 +8809,7 @@ }, "BypassGovernanceRetention": { "shape": "BypassGovernanceRetention", - "documentation": "Indicates whether this operation should bypass Governance-mode restrictions.
", + "documentation": "Indicates whether this action should bypass Governance-mode restrictions.
", "location": "header", "locationName": "x-amz-bypass-governance-retention" }, @@ -8849,7 +8849,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name containing the object.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name containing the object.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -9333,13 +9333,13 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name containing the object to restore.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name containing the object to restore.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, "Key": { "shape": "ObjectKey", - "documentation": "Object key for which the operation was initiated.
", + "documentation": "Object key for which the action was initiated.
", "location": "uri", "locationName": "Key" }, @@ -9431,7 +9431,7 @@ "documentation": "Container for redirect information. You can redirect requests to another host, to another page, or with another protocol. In the event of an error, you can specify a different error code to return.
" } }, - "documentation": "Specifies the redirect behavior and when a redirect is applied. For more information about routing rules, see Configuring advanced conditional redirects in the Amazon Simple Storage Service Developer Guide.
" + "documentation": "Specifies the redirect behavior and when a redirect is applied. For more information about routing rules, see Configuring advanced conditional redirects in the Amazon Simple Storage Service User Guide.
" }, "RoutingRules": { "type": "list", @@ -9465,7 +9465,7 @@ }, "Transition": { "shape": "Transition", - "documentation": "Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.
" + "documentation": "Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service User Guide.
" }, "NoncurrentVersionTransition": { "shape": "NoncurrentVersionTransition" @@ -9775,7 +9775,7 @@ }, "BucketKeyEnabled": { "shape": "BucketKeyEnabled", - "documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled
element to true
causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.
For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service Developer Guide.
" + "documentation": "Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the BucketKeyEnabled
element to true
causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.
For more information, see Amazon S3 Bucket Keys in the Amazon Simple Storage Service User Guide.
" } }, "documentation": "Specifies the default server-side encryption configuration.
" @@ -10047,7 +10047,7 @@ }, "Events": { "shape": "EventList", - "documentation": "The Amazon S3 bucket event about which to send notifications. For more information, see Supported Event Types in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The Amazon S3 bucket event about which to send notifications. For more information, see Supported Event Types in the Amazon Simple Storage Service User Guide.
", "locationName": "Event" }, "Filter": { @@ -10102,7 +10102,7 @@ "documentation": "The storage class to which you want the object to transition.
" } }, - "documentation": "Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.
" + "documentation": "Specifies when an object transitions to a specified storage class. For more information about Amazon S3 lifecycle configuration rules, see Transitioning Objects Using Amazon S3 Lifecycle in the Amazon Simple Storage Service User Guide.
" }, "TransitionList": { "type": "list", @@ -10198,7 +10198,7 @@ "members": { "Bucket": { "shape": "BucketName", - "documentation": "The bucket name.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The bucket name.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, @@ -10373,7 +10373,7 @@ }, "Bucket": { "shape": "BucketName", - "documentation": "The name of the bucket to which the multipart upload was initiated.
When using this API with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this operation with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this API with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this operation using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", + "documentation": "The name of the bucket to which the multipart upload was initiated.
When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the AWS SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using Access Points in the Amazon Simple Storage Service Developer Guide.
When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the AWS SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
", "location": "uri", "locationName": "Bucket" }, diff --git a/apis/s3control-2018-08-20.normal.json b/apis/s3control-2018-08-20.normal.json index 0720053536..fe95428c0a 100644 --- a/apis/s3control-2018-08-20.normal.json +++ b/apis/s3control-2018-08-20.normal.json @@ -27,7 +27,7 @@ "output": { "shape": "CreateAccessPointResult" }, - "documentation": "Creates an access point and associates it with the specified bucket. For more information, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.
Using this action with Amazon S3 on Outposts
This action:
Requires a virtual private cloud (VPC) configuration as S3 on Outposts only supports VPC style access points.
Does not support ACL on S3 on Outposts buckets.
Does not support Public Access on S3 on Outposts buckets.
Does not support object lock for S3 on Outposts buckets.
For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide .
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to CreateAccessPoint
:
Creates an access point and associates it with the specified bucket. For more information, see Managing Data Access with Amazon S3 Access Points in the Amazon Simple Storage Service User Guide.
Using this action with Amazon S3 on Outposts
This action:
Requires a virtual private cloud (VPC) configuration as S3 on Outposts only supports VPC style access points.
Does not support ACL on S3 on Outposts buckets.
Does not support Public Access on S3 on Outposts buckets.
Does not support object lock for S3 on Outposts buckets.
For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide .
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to CreateAccessPoint
:
This API operation creates an Amazon S3 on Outposts bucket. To create an S3 bucket, see Create Bucket in the Amazon Simple Storage Service API.
Creates a new Outposts bucket. By creating the bucket, you become the bucket owner. To create an Outposts bucket, you must have S3 on Outposts. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.
Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.
S3 on Outposts buckets do not support
ACLs. Instead, configure access point policies to manage access to buckets.
Public access.
Object Lock
Bucket Location constraint
For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id
in your API request, see the Examples section.
The following actions are related to CreateBucket
for Amazon S3 on Outposts:
This action creates an Amazon S3 on Outposts bucket. To create an S3 bucket, see Create Bucket in the Amazon Simple Storage Service API.
Creates a new Outposts bucket. By creating the bucket, you become the bucket owner. To create an Outposts bucket, you must have S3 on Outposts. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.
Not every string is an acceptable bucket name. For information on bucket naming restrictions, see Working with Amazon S3 Buckets.
S3 on Outposts buckets support:
Tags
LifecycleConfigurations for deleting expired objects
For a list of Amazon S3 features not supported by Amazon S3 on Outposts, see Unsupported Amazon S3 features.
For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id
in your API request, see the Examples section.
The following actions are related to CreateBucket
for Amazon S3 on Outposts:
You can use S3 Batch Operations to perform large-scale batch operations on Amazon S3 objects. Batch Operations can run a single operation on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
This operation creates a S3 Batch Operations job.
Related actions include:
", + "documentation": "You can use S3 Batch Operations to perform large-scale batch actions on Amazon S3 objects. Batch Operations can run a single action on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.
This action creates a S3 Batch Operations job.
Related actions include:
", "endpoint": { "hostPrefix": "{AccountId}." } @@ -127,7 +127,7 @@ "input": { "shape": "DeleteBucketRequest" }, - "documentation": "This API operation deletes an Amazon S3 on Outposts bucket. To delete an S3 bucket, see DeleteBucket in the Amazon Simple Storage Service API.
Deletes the Amazon S3 on Outposts bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
Related Resources
", + "documentation": "This action deletes an Amazon S3 on Outposts bucket. To delete an S3 bucket, see DeleteBucket in the Amazon Simple Storage Service API.
Deletes the Amazon S3 on Outposts bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
Related Resources
", "endpoint": { "hostPrefix": "{AccountId}." } @@ -141,7 +141,7 @@ "input": { "shape": "DeleteBucketLifecycleConfigurationRequest" }, - "documentation": "This API action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon Simple Storage Service API.
Deletes the lifecycle configuration from the specified Outposts bucket. Amazon S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.
To use this operation, you must have permission to perform the s3-outposts:DeleteLifecycleConfiguration
action. By default, the bucket owner has this permission and the Outposts bucket owner can grant this permission to others.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
For more information about object expiration, see Elements to Describe Lifecycle Actions.
Related actions include:
", + "documentation": "This action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon Simple Storage Service API.
Deletes the lifecycle configuration from the specified Outposts bucket. Amazon S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3-outposts:DeleteLifecycleConfiguration
action. By default, the bucket owner has this permission and the Outposts bucket owner can grant this permission to others.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
For more information about object expiration, see Elements to Describe Lifecycle Actions.
Related actions include:
", "endpoint": { "hostPrefix": "{AccountId}." } @@ -155,7 +155,7 @@ "input": { "shape": "DeleteBucketPolicyRequest" }, - "documentation": "This API operation deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon Simple Storage Service API.
This implementation of the DELETE operation uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy
permissions on the specified Outposts bucket and belong to the bucket owner's account to use this operation. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to DeleteBucketPolicy
:
This action deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon Simple Storage Service API.
This implementation of the DELETE action uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy
permissions on the specified Outposts bucket and belong to the bucket owner's account to use this action. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.
If you don't have DeleteBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to DeleteBucketPolicy
:
This operation deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, see DeleteBucketTagging in the Amazon Simple Storage Service API.
Deletes the tags from the Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service Developer Guide.
To use this operation, you must have permission to perform the PutBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to DeleteBucketTagging
:
This action deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, see DeleteBucketTagging in the Amazon Simple Storage Service API.
Deletes the tags from the Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the PutBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to DeleteBucketTagging
:
Removes the entire tag set from the specified S3 Batch Operations job. To use this operation, you must have permission to perform the s3:DeleteJobTagging
action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.
Related actions include:
", + "documentation": "Removes the entire tag set from the specified S3 Batch Operations job. To use this operation, you must have permission to perform the s3:DeleteJobTagging
action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service User Guide.
Related actions include:
", "endpoint": { "hostPrefix": "{AccountId}." } @@ -226,7 +226,7 @@ "input": { "shape": "DeleteStorageLensConfigurationRequest" }, - "documentation": "Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
Retrieves the configuration parameters and status for a Batch Operations job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Related actions include:
", + "documentation": "Retrieves the configuration parameters and status for a Batch Operations job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.
Related actions include:
", "endpoint": { "hostPrefix": "{AccountId}." } @@ -342,7 +342,7 @@ "output": { "shape": "GetBucketResult" }, - "documentation": "Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:GetBucket
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.
If you don't have s3-outposts:GetBucket
permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied
error.
The following actions are related to GetBucket
for Amazon S3 on Outposts:
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.
If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the s3-outposts:GetBucket
permissions on the specified bucket and belong to the bucket owner's account in order to use this action. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.
If you don't have s3-outposts:GetBucket
permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied
error.
The following actions are related to GetBucket
for Amazon S3 on Outposts:
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
This operation gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon Simple Storage Service API.
Returns the lifecycle configuration information set on the Outposts bucket. For more information, see Using Amazon S3 on Outposts and for information about lifecycle configuration, see Object Lifecycle Management in Amazon Simple Storage Service Developer Guide.
To use this operation, you must have permission to perform the s3-outposts:GetLifecycleConfiguration
action. The Outposts bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following actions are related to GetBucketLifecycleConfiguration
:
This action gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon Simple Storage Service API.
Returns the lifecycle configuration information set on the Outposts bucket. For more information, see Using Amazon S3 on Outposts and for information about lifecycle configuration, see Object Lifecycle Management in Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3-outposts:GetLifecycleConfiguration
action. The Outposts bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
GetBucketLifecycleConfiguration
has the following special error:
Error code: NoSuchLifecycleConfiguration
Description: The lifecycle configuration does not exist.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following actions are related to GetBucketLifecycleConfiguration
:
This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon Simple Storage Service API.
Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy
permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to GetBucketPolicy
:
This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon Simple Storage Service API.
Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.
If you are using an identity other than the root user of the AWS account that owns the bucket, the calling identity must have the GetBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this action.
Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy
permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to GetBucketPolicy
:
This operation gets an Amazon S3 on Outposts bucket's tags. To get an S3 bucket tags, see GetBucketTagging in the Amazon Simple Storage Service API.
Returns the tag set associated with the Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
To use this operation, you must have permission to perform the GetBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
GetBucketTagging
has the following special error:
Error code: NoSuchTagSetError
Description: There is no tag set associated with the bucket.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to GetBucketTagging
:
This action gets an Amazon S3 on Outposts bucket's tags. To get an S3 bucket tags, see GetBucketTagging in the Amazon Simple Storage Service API.
Returns the tag set associated with the Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the GetBucketTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
GetBucketTagging
has the following special error:
Error code: NoSuchTagSetError
Description: There is no tag set associated with the bucket.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to GetBucketTagging
:
Returns the tags on an S3 Batch Operations job. To use this operation, you must have permission to perform the s3:GetJobTagging
action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.
Related actions include:
", + "documentation": "Returns the tags on an S3 Batch Operations job. To use this operation, you must have permission to perform the s3:GetJobTagging
action. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service User Guide.
Related actions include:
", "endpoint": { "hostPrefix": "{AccountId}." } @@ -460,7 +460,7 @@ "output": { "shape": "GetStorageLensConfigurationResult" }, - "documentation": "Gets the Amazon S3 Storage Lens configuration. For more information, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:GetStorageLensConfiguration
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Gets the Amazon S3 Storage Lens configuration. For more information, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:GetStorageLensConfiguration
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
Lists current S3 Batch Operations jobs and jobs that have ended within the last 30 days for the AWS account making the request. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Related actions include:
", + "documentation": "Lists current S3 Batch Operations jobs and jobs that have ended within the last 30 days for the AWS account making the request. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.
Related actions include:
", "endpoint": { "hostPrefix": "{AccountId}." } @@ -539,7 +539,7 @@ "output": { "shape": "ListRegionalBucketsResult" }, - "documentation": "Returns a list of all Outposts buckets in an Outpost that are owned by the authenticated sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id
in your request, see the Examples section.
Returns a list of all Outposts buckets in an Outpost that are owned by the authenticated sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.
For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id
in your request, see the Examples section.
Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:ListStorageLensConfigurations
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:ListStorageLensConfigurations
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
This action puts a bucket policy to an Amazon S3 on Outposts bucket. To put a policy on an S3 bucket, see PutBucketPolicy in the Amazon Simple Storage Service API.
Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the PutBucketPolicy
permissions on the specified Outposts bucket and belong to the bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this operation, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to PutBucketPolicy
:
This action puts a bucket policy to an Amazon S3 on Outposts bucket. To put a policy on an S3 bucket, see PutBucketPolicy in the Amazon Simple Storage Service API.
Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.
If you are using an identity other than the root user of the AWS account that owns the Outposts bucket, the calling identity must have the PutBucketPolicy
permissions on the specified Outposts bucket and belong to the bucket owner's account in order to use this action.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
As a security precaution, the root user of the AWS account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.
For more information about bucket policies, see Using Bucket Policies and User Policies.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to PutBucketPolicy
:
This action puts tags on an Amazon S3 on Outposts bucket. To put tags on an S3 bucket, see PutBucketTagging in the Amazon Simple Storage Service API.
Sets the tags for an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service Developer Guide.
Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.
Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.
To use this operation, you must have permissions to perform the s3-outposts:PutBucketTagging
action. The Outposts bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors:
Error code: InvalidTagError
Description: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For information about tag restrictions, see User-Defined Tag Restrictions and AWS-Generated Cost Allocation Tag Restrictions.
Error code: MalformedXMLError
Description: The XML provided does not match the schema.
Error code: OperationAbortedError
Description: A conflicting conditional operation is currently in progress against this resource. Try again.
Error code: InternalError
Description: The service was unable to apply the provided tag to the bucket.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to PutBucketTagging
:
This action puts tags on an Amazon S3 on Outposts bucket. To put tags on an S3 bucket, see PutBucketTagging in the Amazon Simple Storage Service API.
Sets the tags for an Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon Simple Storage Service User Guide.
Use tags to organize your AWS bill to reflect your own cost structure. To do this, sign up to get your AWS account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging.
Within a bucket, if you add a tag that has the same key as an existing tag, the new value overwrites the old value. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.
To use this action, you must have permissions to perform the s3-outposts:PutBucketTagging
action. The Outposts bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors:
Error code: InvalidTagError
Description: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For information about tag restrictions, see User-Defined Tag Restrictions and AWS-Generated Cost Allocation Tag Restrictions.
Error code: MalformedXMLError
Description: The XML provided does not match the schema.
Error code: OperationAbortedError
Description: A conflicting conditional action is currently in progress against this resource. Try again.
Error code: InternalError
Description: The service was unable to apply the provided tag to the bucket.
All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id
to be passed with the request and an S3 on Outposts endpoint hostname prefix instead of s3-control
. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id
derived using the access point ARN, see the Examples section.
The following actions are related to PutBucketTagging
:
Sets the supplied tag-set on an S3 Batch Operations job.
A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service Developer Guide.
If you send this request with an empty tag set, Amazon S3 deletes the existing tag set on the Batch Operations job. If you use this method, you are charged for a Tier 1 Request (PUT). For more information, see Amazon S3 pricing.
For deleting existing tags for your Batch Operations job, a DeleteJobTagging request is preferred because it achieves the same result without incurring charges.
A few things to consider about using tags:
Amazon S3 limits the maximum number of tags to 50 tags per job.
You can associate up to 50 tags with a job as long as they have unique tag keys.
A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length.
The key and values are case sensitive.
For tagging-related restrictions related to characters and encodings, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide.
To use this operation, you must have permission to perform the s3:PutJobTagging
action.
Related actions include:
", + "documentation": "Sets the supplied tag-set on an S3 Batch Operations job.
A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this action to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon Simple Storage Service User Guide.
If you send this request with an empty tag set, Amazon S3 deletes the existing tag set on the Batch Operations job. If you use this method, you are charged for a Tier 1 Request (PUT). For more information, see Amazon S3 pricing.
For deleting existing tags for your Batch Operations job, a DeleteJobTagging request is preferred because it achieves the same result without incurring charges.
A few things to consider about using tags:
Amazon S3 limits the maximum number of tags to 50 tags per job.
You can associate up to 50 tags with a job as long as they have unique tag keys.
A tag key can be up to 128 Unicode characters in length, and tag values can be up to 256 Unicode characters in length.
The key and values are case sensitive.
For tagging-related restrictions related to characters and encodings, see User-Defined Tag Restrictions in the AWS Billing and Cost Management User Guide.
To use this action, you must have permission to perform the s3:PutJobTagging
action.
Related actions include:
", "endpoint": { "hostPrefix": "{AccountId}." } @@ -690,7 +690,7 @@ "uri": "http://awss3control.amazonaws.com/doc/2018-08-20/" } }, - "documentation": "Puts an Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:PutStorageLensConfiguration
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Puts an Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:PutStorageLensConfiguration
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service Developer Guide.
Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging
action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon Simple Storage Service User Guide.
Updates an existing S3 Batch Operations job's priority. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Related actions include:
", + "documentation": "Updates an existing S3 Batch Operations job's priority. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.
Related actions include:
", "endpoint": { "hostPrefix": "{AccountId}." } @@ -776,7 +776,7 @@ "shape": "InternalServiceException" } ], - "documentation": "Updates the status for the specified job. Use this operation to confirm that you want to run a job or to cancel an existing job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Related actions include:
", + "documentation": "Updates the status for the specified job. Use this action to confirm that you want to run a job or to cancel an existing job. For more information, see S3 Batch Operations in the Amazon Simple Storage Service User Guide.
Related actions include:
", "endpoint": { "hostPrefix": "{AccountId}." } @@ -1108,7 +1108,7 @@ }, "Operation": { "shape": "JobOperation", - "documentation": "The operation that you want this job to perform on every object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service Developer Guide.
" + "documentation": "The action that you want this job to perform on every object listed in the manifest. For more information about the available actions, see Operations in the Amazon Simple Storage Service User Guide.
" }, "Report": { "shape": "JobReport", @@ -1134,7 +1134,7 @@ }, "RoleArn": { "shape": "IAMRoleArn", - "documentation": "The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that Batch Operations will use to run this job's operation on every object in the manifest.
" + "documentation": "The Amazon Resource Name (ARN) for the AWS Identity and Access Management (IAM) role that Batch Operations will use to run this job's action on every object in the manifest.
" }, "Tags": { "shape": "S3TagSet", @@ -2169,7 +2169,7 @@ "box": true } }, - "documentation": "The operation that you want this job to perform on every object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service Developer Guide.
" + "documentation": "The operation that you want this job to perform on every object listed in the manifest. For more information about the available operations, see Operations in the Amazon Simple Storage Service User Guide.
" }, "JobPriority": { "type": "integer", @@ -2710,7 +2710,7 @@ "locationName": "IsPublic" } }, - "documentation": "Indicates whether this access point policy is public. For more information about how Amazon S3 evaluates policies to determine whether they are public, see The Meaning of \"Public\" in the Amazon Simple Storage Service Developer Guide.
" + "documentation": "Indicates whether this access point policy is public. For more information about how Amazon S3 evaluates policies to determine whether they are public, see The Meaning of \"Public\" in the Amazon Simple Storage Service User Guide.
" }, "Prefix": { "type": "string" @@ -2793,7 +2793,7 @@ }, "Policy": { "shape": "Policy", - "documentation": "The policy that you want to apply to the specified access point. For more information about access point policies, see Managing data access with Amazon S3 Access Points in the Amazon Simple Storage Service Developer Guide.
" + "documentation": "The policy that you want to apply to the specified access point. For more information about access point policies, see Managing data access with Amazon S3 Access Points in the Amazon Simple Storage Service User Guide.
" } } }, @@ -3464,7 +3464,7 @@ "documentation": "The Object Lock retention mode to be applied to all objects in the Batch Operations job.
" } }, - "documentation": "Contains the S3 Object Lock retention mode to be applied to all objects in the S3 Batch Operations job. If you don't provide Mode
and RetainUntilDate
data types in your operation, you will remove the retention from your objects. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Contains the S3 Object Lock retention mode to be applied to all objects in the S3 Batch Operations job. If you don't provide Mode
and RetainUntilDate
data types in your operation, you will remove the retention from your objects. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service User Guide.
Contains the Object Lock legal hold status to be applied to all objects in the Batch Operations job.
" } }, - "documentation": "Contains the configuration for an S3 Object Lock legal hold operation that an S3 Batch Operations job passes every object to the underlying PutObjectLegalHold
API. For more information, see Using S3 Object Lock legal hold with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Contains the configuration for an S3 Object Lock legal hold operation that an S3 Batch Operations job passes every object to the underlying PutObjectLegalHold
API. For more information, see Using S3 Object Lock legal hold with S3 Batch Operations in the Amazon Simple Storage Service User Guide.
Contains the Object Lock retention mode to be applied to all objects in the Batch Operations job. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
" + "documentation": "Contains the Object Lock retention mode to be applied to all objects in the Batch Operations job. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service User Guide.
" } }, - "documentation": "Contains the configuration parameters for the Object Lock retention action for an S3 Batch Operations job. Batch Operations passes every object to the underlying PutObjectRetention
API. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service Developer Guide.
Contains the configuration parameters for the Object Lock retention action for an S3 Batch Operations job. Batch Operations passes every object to the underlying PutObjectRetention
API. For more information, see Using S3 Object Lock retention with S3 Batch Operations in the Amazon Simple Storage Service User Guide.
The storage class to which you want the object to transition.
" } }, - "documentation": "Specifies when an object transitions to a specified storage class. For more information about Amazon S3 Lifecycle configuration rules, see Transitioning objects using Amazon S3 Lifecycle in the Amazon Simple Storage Service Developer Guide.
" + "documentation": "Specifies when an object transitions to a specified storage class. For more information about Amazon S3 Lifecycle configuration rules, see Transitioning objects using Amazon S3 Lifecycle in the Amazon Simple Storage Service User Guide.
" }, "TransitionList": { "type": "list", @@ -3939,5 +3939,5 @@ "min": 1 } }, - "documentation": "AWS S3 Control provides access to Amazon S3 control plane operations.
" + "documentation": "AWS S3 Control provides access to Amazon S3 control plane actions.
" } \ No newline at end of file diff --git a/clients/autoscaling.d.ts b/clients/autoscaling.d.ts index f1afbd4338..4eaae2ad82 100644 --- a/clients/autoscaling.d.ts +++ b/clients/autoscaling.d.ts @@ -867,7 +867,7 @@ declare namespace AutoScaling { */ LaunchTemplate?: LaunchTemplateSpecification; /** - * An embedded object that specifies a mixed instances policy. The required parameters must be specified. If optional parameters are unspecified, their default values are used. The policy includes parameters that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities, but also the parameters that specify the instance configuration information—the launch template and instance types. The policy can also include a weight for each instance type and different launch templates for individual instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide. + * An embedded object that specifies a mixed instances policy. The required properties must be specified. If optional properties are unspecified, their default values are used. The policy includes properties that not only define the distribution of On-Demand Instances and Spot Instances, the maximum price to pay for Spot Instances, and how the Auto Scaling group allocates instance types to fulfill On-Demand and Spot capacities, but also the properties that specify the instance configuration information—the launch template and instance types. The policy can also include a weight for each instance type and different launch templates for individual instance types. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide. */ MixedInstancesPolicy?: MixedInstancesPolicy; /** @@ -1678,7 +1678,7 @@ declare namespace AutoScaling { export type Instances = Instance[]; export interface InstancesDistribution { /** - * Indicates how to allocate instance types to fulfill On-Demand capacity. The only valid value is prioritized, which is also the default value. This strategy uses the order of instance types in the overrides to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on. + * Indicates how to allocate instance types to fulfill On-Demand capacity. The only valid value is prioritized, which is also the default value. This strategy uses the order of instance types in the LaunchTemplateOverrides to define the launch priority of each instance type. The first instance type in the array is prioritized higher than the last. If all your On-Demand capacity cannot be fulfilled using your highest priority instance, then the Auto Scaling groups launches the remaining capacity using the second priority instance type, and so on. */ OnDemandAllocationStrategy?: XmlString; /** @@ -1690,7 +1690,7 @@ declare namespace AutoScaling { */ OnDemandPercentageAboveBaseCapacity?: OnDemandPercentageAboveBaseCapacity; /** - * Indicates how to allocate instances across Spot Instance pools. If the allocation strategy is capacity-optimized (recommended), the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity. If the allocation strategy is lowest-price, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. Defaults to lowest-price if not specified. + * Indicates how to allocate instances across Spot Instance pools. If the allocation strategy is lowest-price, the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools that you specify. Defaults to lowest-price if not specified. If the allocation strategy is capacity-optimized (recommended), the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity. Alternatively, you can use capacity-optimized-prioritized and set the order of instance types in the list of launch template overrides from highest to lowest priority (from first to last in the list). Amazon EC2 Auto Scaling honors the instance type priorities on a best-effort basis but optimizes for capacity first. */ SpotAllocationStrategy?: XmlString; /** @@ -1824,7 +1824,7 @@ declare namespace AutoScaling { */ LaunchTemplateSpecification?: LaunchTemplateSpecification; /** - * Any parameters that you specify override the same parameters in the launch template. If not provided, Amazon EC2 Auto Scaling uses the instance type specified in the launch template when it launches an instance. + * Any properties that you specify override the same properties in the launch template. If not provided, Amazon EC2 Auto Scaling uses the instance type specified in the launch template when it launches an instance. */ Overrides?: Overrides; } @@ -2002,7 +2002,7 @@ declare namespace AutoScaling { */ LaunchTemplate?: LaunchTemplate; /** - * Specifies the instances distribution. If not provided, the value for each parameter in InstancesDistribution uses a default value. + * Specifies the instances distribution. If not provided, the value for each property in InstancesDistribution uses a default value. */ InstancesDistribution?: InstancesDistribution; } @@ -2612,7 +2612,7 @@ declare namespace AutoScaling { */ LaunchTemplate?: LaunchTemplateSpecification; /** - * An embedded object that specifies a mixed instances policy. When you make changes to an existing policy, all optional parameters are left unchanged if not specified. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide. + * An embedded object that specifies a mixed instances policy. When you make changes to an existing policy, all optional properties are left unchanged if not specified. For more information, see Auto Scaling groups with multiple instance types and purchase options in the Amazon EC2 Auto Scaling User Guide. */ MixedInstancesPolicy?: MixedInstancesPolicy; /** diff --git a/clients/emr.d.ts b/clients/emr.d.ts index 3b5305d87d..4eaaafcb7e 100644 --- a/clients/emr.d.ts +++ b/clients/emr.d.ts @@ -1555,7 +1555,7 @@ declare namespace EMR { */ InstanceGroupType?: InstanceGroupType; /** - * The bid price for each EC2 Spot Instance type as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%. + * If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice to set the amount equal to the On-Demand price, or specify an amount in USD. */ BidPrice?: String; /** @@ -1621,7 +1621,7 @@ declare namespace EMR { */ InstanceRole: InstanceRoleType; /** - * The bid price for each EC2 Spot Instance as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%. + * If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice to set the amount equal to the On-Demand price, or specify an amount in USD. */ BidPrice?: XmlStringMaxLen256; /** @@ -1664,7 +1664,7 @@ declare namespace EMR { */ InstanceRole: InstanceRoleType; /** - * The bid price for each EC2 Spot Instance as defined by InstanceType. Expressed in USD. If neither BidPrice nor BidPriceAsPercentageOfOnDemandPrice is provided, BidPriceAsPercentageOfOnDemandPrice defaults to 100%. + * If specified, indicates that the instance group uses Spot Instances. This is the maximum price you are willing to pay for Spot Instances. Specify OnDemandPrice to set the amount equal to the On-Demand price, or specify an amount in USD. */ BidPrice?: XmlStringMaxLen256; /** @@ -2409,7 +2409,7 @@ declare namespace EMR { */ ClusterId: String; /** - * The number of steps that can be executed concurrently. You can specify a maximum of 256 steps. + * The number of steps that can be executed concurrently. You can specify a minimum of 1 step and a maximum of 256 steps. */ StepConcurrencyLevel?: Integer; } @@ -2523,12 +2523,28 @@ declare namespace EMR { EndTime?: _Date; } export type NotebookExecutionSummaryList = NotebookExecutionSummary[]; + export interface OnDemandCapacityReservationOptions { + /** + * Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity. If you specify use-capacity-reservations-first, the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy (lowest-price) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy (lowest-price). If you do not specify a value, the fleet fulfils the On-Demand capacity according to the chosen On-Demand allocation strategy. + */ + UsageStrategy?: OnDemandCapacityReservationUsageStrategy; + /** + * Indicates the instance's Capacity Reservation preferences. Possible preferences include: open - The instance can run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone). none - The instance avoids running in a Capacity Reservation even if one is available. The instance runs as an On-Demand Instance. + */ + CapacityReservationPreference?: OnDemandCapacityReservationPreference; + } + export type OnDemandCapacityReservationPreference = "open"|"none"|string; + export type OnDemandCapacityReservationUsageStrategy = "use-capacity-reservations-first"|string; export type OnDemandProvisioningAllocationStrategy = "lowest-price"|string; export interface OnDemandProvisioningSpecification { /** - * Specifies the strategy to use in launching On-Demand Instance fleets. Currently, the only option is lowest-price (the default), which launches the lowest price first. + * Specifies the strategy to use in launching On-Demand instance fleets. Currently, the only option is lowest-price (the default), which launches the lowest price first. */ AllocationStrategy: OnDemandProvisioningAllocationStrategy; + /** + * The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy. + */ + CapacityReservationOptions?: OnDemandCapacityReservationOptions; } export type OptionalArnType = string; export interface PlacementGroupConfig { diff --git a/clients/kinesisvideoarchivedmedia.d.ts b/clients/kinesisvideoarchivedmedia.d.ts index 7c205d87b4..86b252aa98 100644 --- a/clients/kinesisvideoarchivedmedia.d.ts +++ b/clients/kinesisvideoarchivedmedia.d.ts @@ -21,27 +21,27 @@ declare class KinesisVideoArchivedMedia extends Service { */ getClip(callback?: (err: AWSError, data: KinesisVideoArchivedMedia.Types.GetClipOutput) => void): Request>1,l=23===a?Math.pow(2,-24)-Math.pow(2,-77):0,d=i?0:s-1,y=i?1:-1,b=t<0||0===t&&1/t<0?1:0;for(t=Math.abs(t),isNaN(t)||t===1/0?(n=isNaN(t)?1:0,o=m):(o=Math.floor(Math.log(t)/Math.LN2),t*(u=Math.pow(2,-o))<1&&(o--,u*=2),t+=o+c>=1?l/u:l*Math.pow(2,1-c),t*u>=2&&(o++,u/=2),o+c>=m?(n=0,o=m):o+c>=1?(n=(t*u-1)*Math.pow(2,a),o+=c):(n=t*Math.pow(2,c-1)*Math.pow(2,a),o=0));a>=8;e[r+d]=255&n,d+=y,n/=256,a-=8);for(o=o<0;e[r+d]=255&o,d+=y,o/=256,p-=8);e[r+d-y]|=128*b}},{}],419:[function(e,t,r){var i={}.toString;t.exports=Array.isArray||function(e){return"[object Array]"==i.call(e)}},{}],420:[function(e,t,r){!function(e){"use strict";function t(e){return null!==e&&"[object Array]"===Object.prototype.toString.call(e)}function r(e){return null!==e&&"[object Object]"===Object.prototype.toString.call(e)}function i(e,a){if(e===a)return!0;if(Object.prototype.toString.call(e)!==Object.prototype.toString.call(a))return!1;if(!0===t(e)){if(e.length!==a.length)return!1;for(var s=0;s G((f-r)/g)&&a("overflow"),r+=(p-t)*g,t=p,u=0;u =0?(c=b.substr(0,S),l=b.substr(S+1)):(c=b,l=""),d=decodeURIComponent(c),y=decodeURIComponent(l),i(o,d)?a(o[d])?o[d].push(y):o[d]=[o[d],y]:o[d]=y}return o};var a=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)}},{}],424:[function(e,t,r){"use strict";function i(e,t){if(e.map)return e.map(t);for(var r=[],i=0;i=55296&&t<=56319&&a65535&&(e-=65536,t+=w(e>>>10&1023|55296),e=56320|1023&e),t+=w(e)}).join("")}function p(e){return e-48<10?e-22:e-65<26?e-65:e-97<26?e-97:T}function m(e,t){return e+22+75*(e<26)-((0!=t)<<5)}function c(e,t,r){var i=0;for(e=r?G(e/A):e>>1,e+=G(e/t);e>L*k>>1;i+=T)e=G(e/L);return G(i+(L+1)*e/(e+v))}function l(e){var t,r,i,s,o,n,m,l,d,y,b=[],S=e.length,g=0,h=D,I=R;for(r=e.lastIndexOf(P),r<0&&(r=0),i=0;i=S&&a("invalid-input"),l=p(e.charCodeAt(s++)),(l>=T||l>G((f-g)/n))&&a("overflow"),g+=l*n,d=m<=I?C:m>=I+k?k:m-I,!(l=t&&bf&&a("overflow"),b==t){for(l=r,d=T;y=d<=o?C:d>=o+k?k:d-o,!(l>>((3&t)<<3)&255;return s}}},{}],437:[function(e,t,r){function i(e,t,r){var i=t&&r||0,m=t||[];e=e||{};var c=e.node||a,l=void 0!==e.clockseq?e.clockseq:s;if(null==c||null==l){var d=o();null==c&&(c=a=[1|d[0],d[1],d[2],d[3],d[4],d[5]]),null==l&&(l=s=16383&(d[6]<<8|d[7]))}var y=void 0!==e.msecs?e.msecs:(new Date).getTime(),b=void 0!==e.nsecs?e.nsecs:p+1,S=y-u+(b-p)/1e4;if(S<0&&void 0===e.clockseq&&(l=l+1&16383),(S<0||y>u)&&void 0===e.nsecs&&(b=0),b>=1e4)throw new Error("uuid.v1(): Can't create more than 10M uuids/sec");u=y,p=b,s=l,y+=122192928e5;var g=(1e4*(268435455&y)+b)%4294967296;m[i++]=g>>>24&255,m[i++]=g>>>16&255,m[i++]=g>>>8&255,m[i++]=255&g;var h=y/4294967296*1e4&268435455;m[i++]=h>>>8&255,m[i++]=255&h,m[i++]=h>>>24&15|16,m[i++]=h>>>16&255,m[i++]=l>>>8|128,m[i++]=255&l;for(var I=0;I<6;++I)m[i+I]=c[I];return t||n(m)}var a,s,o=e("./lib/rng"),n=e("./lib/bytesToUuid"),u=0,p=0;t.exports=i},{"./lib/bytesToUuid":435,"./lib/rng":436}],438:[function(e,t,r){function i(e,t,r){var i=t&&r||0;"string"==typeof e&&(t="binary"===e?new Array(16):null,e=null),e=e||{};var o=e.random||(e.rng||a)();if(o[6]=15&o[6]|64,o[8]=63&o[8]|128,t)for(var n=0;n<16;++n)t[i+n]=o[n];return t||s(o)}var a=e("./lib/rng"),s=e("./lib/bytesToUuid");t.exports=i},{"./lib/bytesToUuid":435,"./lib/rng":436}],439:[function(e,t,r){"use strict";Object.defineProperty(r,"__esModule",{value:!0});var i=e("./utils/LRU"),a=1e3,s=function(){function e(e){void 0===e&&(e=a),this.maxSize=e,this.cache=new i.LRUCache(e)}return Object.defineProperty(e.prototype,"size",{get:function(){return this.cache.length},enumerable:!0,configurable:!0}),e.prototype.put=function(t,r){var i="string"!=typeof t?e.getKeyString(t):t,a=this.populateValue(r);this.cache.put(i,a)},e.prototype.get=function(t){var r="string"!=typeof t?e.getKeyString(t):t,i=Date.now(),a=this.cache.get(r);if(a)for(var s=0;s0){t=new n.XML.Parser;var c=t.parse(i.toString(),s);u.update(e.data,c)}}var n=e("../core"),u=e("../util"),p=e("./rest");t.exports={buildRequest:a,extractError:s,extractData:o}},{"../core":332,"../util":407,"./rest":369}],372:[function(e,t,r){function i(){}function a(e){return e.isQueryName||"ec2"!==e.api.protocol?e.name:e.name[0].toUpperCase()+e.name.substr(1)}function s(e,t,r,i){p.each(r.members,function(r,s){var o=t[r];if(null!==o&&void 0!==o){var n=a(s);n=e?e+"."+n:n,u(n,o,s,i)}})}function o(e,t,r,i){var a=1;p.each(t,function(t,s){var o=r.flattened?".":".entry.",n=o+a+++".",p=n+(r.key.name||"key"),m=n+(r.value.name||"value");u(e+p,t,r.key,i),u(e+m,s,r.value,i)})}function n(e,t,r,i){var s=r.member||{};if(0===t.length)return void i.call(this,e,null);p.arrayEach(t,function(t,o){var n="."+(o+1);if("ec2"===r.api.protocol)n+="";else if(r.flattened){if(s.name){var p=e.split(".");p.pop(),p.push(a(s)),e=p.join(".")}}else n="."+(s.name?s.name:"member")+n;u(e+n,t,s,i)})}function u(e,t,r,i){null!==t&&void 0!==t&&("structure"===r.type?s(e,t,r,i):"list"===r.type?n(e,t,r,i):"map"===r.type?o(e,t,r,i):i(e,r.toWireFormat(t).toString()))}var p=e("../util");i.prototype.serialize=function(e,t,r){s("",e,t,r)},t.exports=i},{"../util":407}],373:[function(e,t,r){var i=e("../core"),a=null,s={signatureVersion:"v4",signingName:"rds-db",operations:{}},o={region:"string",hostname:"string",port:"number",username:"string"};i.RDS.Signer=i.util.inherit({constructor:function(e){this.options=e||{}},convertUrlToAuthToken:function(e){if(0===e.indexOf("https://"))return e.substring("https://".length)},getAuthToken:function(e,t){"function"==typeof e&&void 0===t&&(t=e,e={});var r=this,o="function"==typeof t;e=i.util.merge(this.options,e);var n=this.validateAuthTokenOptions(e);if(!0!==n){if(o)return t(n,null);throw n}var u={region:e.region,endpoint:new i.Endpoint(e.hostname+":"+e.port),paramValidation:!1,signatureVersion:"v4"};e.credentials&&(u.credentials=e.credentials),a=new i.Service(u),a.api=s;var p=a.makeRequest();if(this.modifyRequestForAuthToken(p,e),!o){var m=p.presign(900);return this.convertUrlToAuthToken(m)}p.presign(900,function(e,i){i&&(i=r.convertUrlToAuthToken(i)),t(e,i)})},modifyRequestForAuthToken:function(e,t){e.on("build",e.buildAsGet),e.httpRequest.body=i.util.queryParamsToString({Action:"connect",DBUser:t.username})},validateAuthTokenOptions:function(e){var t="";e=e||{};for(var r in o)Object.prototype.hasOwnProperty.call(o,r)&&typeof e[r]!==o[r]&&(t+="option '"+r+"' should have been type '"+o[r]+"', was '"+typeof e[r]+"'.\n");return!t.length||i.util.error(new Error,{code:"InvalidParameter",message:t})}})},{"../core":332}],374:[function(e,t,r){t.exports={now:function(){return"undefined"!=typeof performance&&"function"==typeof performance.now?performance.now():Date.now()}}},{}],375:[function(e,t,r){function i(e){if(!e)return null;var t=e.split("-");return t.length<3?null:t.slice(0,t.length-2).join("-")+"-*"}function a(e){var t=e.config.region,r=i(t),a=e.api.endpointPrefix;return[[t,a],[r,a],[t,"*"],[r,"*"],["*",a],["*","*"]].map(function(e){return e[0]&&e[1]?e.join("/"):null})}function s(e,t){u.each(t,function(t,r){"globalEndpoint"!==t&&(void 0!==e.config[t]&&null!==e.config[t]||(e.config[t]=r))})}function o(e){for(var t=a(e),r=0;r