From 9e29187dc08c2e9bf8f66166841ca9c75e0624ab Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Fri, 16 Feb 2024 19:14:57 +0000 Subject: [PATCH] Regenerated Clients --- .../359ed06a88084e41b66034d577e49a80.json | 8 + .../4ed09e1fbf5b45768a7777c1aa4787d9.json | 8 + .../67228f2bc98944a7bae5fa2de18ec0fd.json | 8 + .../691024699e854bed9dd3425df53e5038.json | 8 + .../7743aba7b2bd45aa811605a1b3a39d5c.json | 8 + .../8c1afd5068bc4128b30aa12a58cbbf17.json | 8 + .../internal/endpoints/endpoints.go | 60 ++ .../internal/endpoints/endpoints.go | 3 + .../api_op_CompleteAttachmentUpload.go | 8 +- .../api_op_GetTranscript.go | 13 +- .../connectparticipant/api_op_SendEvent.go | 15 +- service/connectparticipant/types/errors.go | 3 +- service/ec2/internal/endpoints/endpoints.go | 18 + service/efs/internal/endpoints/endpoints.go | 18 + .../emr/api_op_SetUnhealthyNodeReplacement.go | 150 +++ service/emr/deserializers.go | 103 ++ service/emr/generated.json | 1 + service/emr/serializers.go | 79 ++ service/emr/types/types.go | 12 + service/emr/validators.go | 42 + .../firehose/api_op_CreateDeliveryStream.go | 48 +- .../firehose/api_op_DeleteDeliveryStream.go | 38 +- .../firehose/api_op_DescribeDeliveryStream.go | 2 +- service/firehose/api_op_PutRecord.go | 70 +- service/firehose/api_op_PutRecordBatch.go | 63 +- .../api_op_StartDeliveryStreamEncryption.go | 37 +- .../api_op_StopDeliveryStreamEncryption.go | 20 +- service/firehose/api_op_UpdateDestination.go | 18 +- service/firehose/deserializers.go | 18 + service/firehose/doc.go | 8 +- service/firehose/serializers.go | 20 + service/firehose/types/enums.go | 4 + service/firehose/types/errors.go | 8 +- service/firehose/types/types.go | 900 +++++++++--------- .../lambda/api_op_CreateEventSourceMapping.go | 9 +- service/lambda/api_op_CreateFunction.go | 10 +- .../lambda/api_op_GetFunctionConfiguration.go | 6 +- service/lambda/api_op_Invoke.go | 3 +- service/lambda/api_op_InvokeAsync.go | 4 +- .../lambda/api_op_ListEventSourceMappings.go | 4 +- service/lambda/api_op_ListLayerVersions.go | 2 +- service/lambda/api_op_ListLayers.go | 2 +- service/lambda/api_op_PublishVersion.go | 6 +- .../lambda/api_op_UpdateEventSourceMapping.go | 5 +- service/lambda/api_op_UpdateFunctionCode.go | 6 +- .../api_op_UpdateFunctionConfiguration.go | 10 +- service/lambda/types/types.go | 27 +- service/osis/internal/endpoints/endpoints.go | 6 + service/rds/api_op_CreateDBCluster.go | 3 +- .../api_op_CreateDBClusterParameterGroup.go | 2 +- service/rds/types/types.go | 1 + .../internal/endpoints/endpoints.go | 75 ++ .../sns/api_op_CreatePlatformApplication.go | 10 +- service/sns/api_op_GetEndpointAttributes.go | 2 +- ...api_op_GetPlatformApplicationAttributes.go | 4 + ...api_op_SetPlatformApplicationAttributes.go | 9 +- 56 files changed, 1375 insertions(+), 658 deletions(-) create mode 100644 .changelog/359ed06a88084e41b66034d577e49a80.json create mode 100644 .changelog/4ed09e1fbf5b45768a7777c1aa4787d9.json create mode 100644 .changelog/67228f2bc98944a7bae5fa2de18ec0fd.json create mode 100644 .changelog/691024699e854bed9dd3425df53e5038.json create mode 100644 .changelog/7743aba7b2bd45aa811605a1b3a39d5c.json create mode 100644 .changelog/8c1afd5068bc4128b30aa12a58cbbf17.json create mode 100644 service/emr/api_op_SetUnhealthyNodeReplacement.go diff --git a/.changelog/359ed06a88084e41b66034d577e49a80.json b/.changelog/359ed06a88084e41b66034d577e49a80.json new file mode 100644 index 00000000000..62c9b72f7f4 --- /dev/null +++ b/.changelog/359ed06a88084e41b66034d577e49a80.json @@ -0,0 +1,8 @@ +{ + "id": "359ed06a-8808-4e41-b660-34d577e49a80", + "type": "documentation", + "description": "Doc only update to GetTranscript API reference guide to inform users about presence of events in the chat transcript.", + "modules": [ + "service/connectparticipant" + ] +} \ No newline at end of file diff --git a/.changelog/4ed09e1fbf5b45768a7777c1aa4787d9.json b/.changelog/4ed09e1fbf5b45768a7777c1aa4787d9.json new file mode 100644 index 00000000000..d49cbcd0a6a --- /dev/null +++ b/.changelog/4ed09e1fbf5b45768a7777c1aa4787d9.json @@ -0,0 +1,8 @@ +{ + "id": "4ed09e1f-bf5b-4576-8a77-77c1aa4787d9", + "type": "documentation", + "description": "Documentation-only updates for Lambda to clarify a number of existing actions and properties.", + "modules": [ + "service/lambda" + ] +} \ No newline at end of file diff --git a/.changelog/67228f2bc98944a7bae5fa2de18ec0fd.json b/.changelog/67228f2bc98944a7bae5fa2de18ec0fd.json new file mode 100644 index 00000000000..91ac3726a1a --- /dev/null +++ b/.changelog/67228f2bc98944a7bae5fa2de18ec0fd.json @@ -0,0 +1,8 @@ +{ + "id": "67228f2b-c989-44a7-bae5-fa2de18ec0fd", + "type": "feature", + "description": "adds fine grained control over Unhealthy Node Replacement to Amazon ElasticMapReduce", + "modules": [ + "service/emr" + ] +} \ No newline at end of file diff --git a/.changelog/691024699e854bed9dd3425df53e5038.json b/.changelog/691024699e854bed9dd3425df53e5038.json new file mode 100644 index 00000000000..1bff2a3e7f6 --- /dev/null +++ b/.changelog/691024699e854bed9dd3425df53e5038.json @@ -0,0 +1,8 @@ +{ + "id": "69102469-9e85-4bed-9dd3-425df53e5038", + "type": "documentation", + "description": "Doc only update for a valid option in DB parameter group", + "modules": [ + "service/rds" + ] +} \ No newline at end of file diff --git a/.changelog/7743aba7b2bd45aa811605a1b3a39d5c.json b/.changelog/7743aba7b2bd45aa811605a1b3a39d5c.json new file mode 100644 index 00000000000..f528305ed3a --- /dev/null +++ b/.changelog/7743aba7b2bd45aa811605a1b3a39d5c.json @@ -0,0 +1,8 @@ +{ + "id": "7743aba7-b2bd-45aa-8116-05a1b3a39d5c", + "type": "feature", + "description": "This release adds support for Data Message Extraction for decompressed CloudWatch logs, and to use a custom file extension or time zone for S3 destinations.", + "modules": [ + "service/firehose" + ] +} \ No newline at end of file diff --git a/.changelog/8c1afd5068bc4128b30aa12a58cbbf17.json b/.changelog/8c1afd5068bc4128b30aa12a58cbbf17.json new file mode 100644 index 00000000000..bcfe1da674c --- /dev/null +++ b/.changelog/8c1afd5068bc4128b30aa12a58cbbf17.json @@ -0,0 +1,8 @@ +{ + "id": "8c1afd50-68bc-4128-b30a-a12a58cbbf17", + "type": "feature", + "description": "This release marks phone numbers as sensitive inputs.", + "modules": [ + "service/sns" + ] +} \ No newline at end of file diff --git a/service/auditmanager/internal/endpoints/endpoints.go b/service/auditmanager/internal/endpoints/endpoints.go index aec0349882e..16208f49e24 100644 --- a/service/auditmanager/internal/endpoints/endpoints.go +++ b/service/auditmanager/internal/endpoints/endpoints.go @@ -166,15 +166,75 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "us-east-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "us-east-2", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2-fips", + }: endpoints.Endpoint{ + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "us-west-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "us-west-2", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, }, }, { diff --git a/service/cloudtraildata/internal/endpoints/endpoints.go b/service/cloudtraildata/internal/endpoints/endpoints.go index 876c642faa6..0d6cd851d4d 100644 --- a/service/cloudtraildata/internal/endpoints/endpoints.go +++ b/service/cloudtraildata/internal/endpoints/endpoints.go @@ -187,6 +187,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "eu-west-3", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "me-south-1", }: endpoints.Endpoint{}, diff --git a/service/connectparticipant/api_op_CompleteAttachmentUpload.go b/service/connectparticipant/api_op_CompleteAttachmentUpload.go index c2715092996..2c25309afbe 100644 --- a/service/connectparticipant/api_op_CompleteAttachmentUpload.go +++ b/service/connectparticipant/api_op_CompleteAttachmentUpload.go @@ -12,9 +12,11 @@ import ( ) // Allows you to confirm that the attachment has been uploaded using the -// pre-signed URL provided in StartAttachmentUpload API. ConnectionToken is used -// for invoking this API instead of ParticipantToken . The Amazon Connect -// Participant Service APIs do not use Signature Version 4 authentication (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) +// pre-signed URL provided in StartAttachmentUpload API. A conflict exception is +// thrown when an attachment with that identifier is already being uploaded. +// ConnectionToken is used for invoking this API instead of ParticipantToken . The +// Amazon Connect Participant Service APIs do not use Signature Version 4 +// authentication (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) // . func (c *Client) CompleteAttachmentUpload(ctx context.Context, params *CompleteAttachmentUploadInput, optFns ...func(*Options)) (*CompleteAttachmentUploadOutput, error) { if params == nil { diff --git a/service/connectparticipant/api_op_GetTranscript.go b/service/connectparticipant/api_op_GetTranscript.go index 834c9ca604a..665e285dce0 100644 --- a/service/connectparticipant/api_op_GetTranscript.go +++ b/service/connectparticipant/api_op_GetTranscript.go @@ -15,8 +15,17 @@ import ( // Retrieves a transcript of the session, including details about any attachments. // For information about accessing past chat contact transcripts for a persistent // chat, see Enable persistent chat (https://docs.aws.amazon.com/connect/latest/adminguide/chat-persistence.html) -// . ConnectionToken is used for invoking this API instead of ParticipantToken . -// The Amazon Connect Participant Service APIs do not use Signature Version 4 +// . If you have a process that consumes events in the transcript of an chat that +// has ended, note that chat transcripts contain the following event content types +// if the event has occurred during the chat session: +// - application/vnd.amazonaws.connect.event.participant.left +// - application/vnd.amazonaws.connect.event.participant.joined +// - application/vnd.amazonaws.connect.event.chat.ended +// - application/vnd.amazonaws.connect.event.transfer.succeeded +// - application/vnd.amazonaws.connect.event.transfer.failed +// +// ConnectionToken is used for invoking this API instead of ParticipantToken . The +// Amazon Connect Participant Service APIs do not use Signature Version 4 // authentication (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) // . func (c *Client) GetTranscript(ctx context.Context, params *GetTranscriptInput, optFns ...func(*Options)) (*GetTranscriptOutput, error) { diff --git a/service/connectparticipant/api_op_SendEvent.go b/service/connectparticipant/api_op_SendEvent.go index 30fa1df32be..0df5e36b8bd 100644 --- a/service/connectparticipant/api_op_SendEvent.go +++ b/service/connectparticipant/api_op_SendEvent.go @@ -11,9 +11,15 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sends an event. ConnectionToken is used for invoking this API instead of -// ParticipantToken . The Amazon Connect Participant Service APIs do not use -// Signature Version 4 authentication (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) +// The application/vnd.amazonaws.connect.event.connection.acknowledged ContentType +// will no longer be supported starting December 31, 2024. This event has been +// migrated to the CreateParticipantConnection (https://docs.aws.amazon.com/connect-participant/latest/APIReference/API_CreateParticipantConnection.html) +// API using the ConnectParticipant field. Sends an event. Message receipts are +// not supported when there are more than two active participants in the chat. +// Using the SendEvent API for message receipts when a supervisor is barged-in will +// result in a conflict exception. ConnectionToken is used for invoking this API +// instead of ParticipantToken . The Amazon Connect Participant Service APIs do not +// use Signature Version 4 authentication (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) // . func (c *Client) SendEvent(ctx context.Context, params *SendEventInput, optFns ...func(*Options)) (*SendEventOutput, error) { if params == nil { @@ -39,7 +45,8 @@ type SendEventInput struct { // The content type of the request. Supported types are: // - application/vnd.amazonaws.connect.event.typing - // - application/vnd.amazonaws.connect.event.connection.acknowledged + // - application/vnd.amazonaws.connect.event.connection.acknowledged (will be + // deprecated on December 31, 2024) // - application/vnd.amazonaws.connect.event.message.delivered // - application/vnd.amazonaws.connect.event.message.read // diff --git a/service/connectparticipant/types/errors.go b/service/connectparticipant/types/errors.go index fa6fa816df8..f45be08b2f5 100644 --- a/service/connectparticipant/types/errors.go +++ b/service/connectparticipant/types/errors.go @@ -33,7 +33,8 @@ func (e *AccessDeniedException) ErrorCode() string { } func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// An attachment with that identifier is already being uploaded. +// The requested operation conflicts with the current state of a service resource +// associated with the request. type ConflictException struct { Message *string diff --git a/service/ec2/internal/endpoints/endpoints.go b/service/ec2/internal/endpoints/endpoints.go index da932235849..b2d7c51bdaa 100644 --- a/service/ec2/internal/endpoints/endpoints.go +++ b/service/ec2/internal/endpoints/endpoints.go @@ -187,6 +187,15 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "ec2-fips.ca-central-1.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, @@ -226,6 +235,15 @@ var defaultPartitions = endpoints.Partitions{ }, Deprecated: aws.TrueTernary, }, + endpoints.EndpointKey{ + Region: "fips-ca-west-1", + }: endpoints.Endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "fips-us-east-1", }: endpoints.Endpoint{ diff --git a/service/efs/internal/endpoints/endpoints.go b/service/efs/internal/endpoints/endpoints.go index 4a464794d0a..5eabf7fa873 100644 --- a/service/efs/internal/endpoints/endpoints.go +++ b/service/efs/internal/endpoints/endpoints.go @@ -247,6 +247,15 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, @@ -427,6 +436,15 @@ var defaultPartitions = endpoints.Partitions{ }, Deprecated: aws.TrueTernary, }, + endpoints.EndpointKey{ + Region: "fips-ca-west-1", + }: endpoints.Endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "fips-eu-central-1", }: endpoints.Endpoint{ diff --git a/service/emr/api_op_SetUnhealthyNodeReplacement.go b/service/emr/api_op_SetUnhealthyNodeReplacement.go new file mode 100644 index 00000000000..32ec848dce1 --- /dev/null +++ b/service/emr/api_op_SetUnhealthyNodeReplacement.go @@ -0,0 +1,150 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package emr + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Specify whether to enable unhealthy node replacement, which lets Amazon EMR +// gracefully replace core nodes on a cluster if any nodes become unhealthy. For +// example, a node becomes unhealthy if disk usage is above 90%. If unhealthy node +// replacement is on and TerminationProtected are off, Amazon EMR immediately +// terminates the unhealthy core nodes. To use unhealthy node replacement and +// retain unhealthy core nodes, use to turn on termination protection. In such +// cases, Amazon EMR adds the unhealthy nodes to a denylist, reducing job +// interruptions and failures. If unhealthy node replacement is on, Amazon EMR +// notifies YARN and other applications on the cluster to stop scheduling tasks +// with these nodes, moves the data, and then terminates the nodes. For more +// information, see graceful node replacement (https://docs.aws.amazon.com/emr/latest/ManagementGuide/UsingEMR_UnhealthyNodeReplacement.html) +// in the Amazon EMR Management Guide. +func (c *Client) SetUnhealthyNodeReplacement(ctx context.Context, params *SetUnhealthyNodeReplacementInput, optFns ...func(*Options)) (*SetUnhealthyNodeReplacementOutput, error) { + if params == nil { + params = &SetUnhealthyNodeReplacementInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "SetUnhealthyNodeReplacement", params, optFns, c.addOperationSetUnhealthyNodeReplacementMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*SetUnhealthyNodeReplacementOutput) + out.ResultMetadata = metadata + return out, nil +} + +type SetUnhealthyNodeReplacementInput struct { + + // The list of strings that uniquely identify the clusters for which to turn on + // unhealthy node replacement. You can get these identifiers by running the + // RunJobFlow or the DescribeJobFlows operations. + // + // This member is required. + JobFlowIds []string + + // Indicates whether to turn on or turn off graceful unhealthy node replacement. + // + // This member is required. + UnhealthyNodeReplacement *bool + + noSmithyDocumentSerde +} + +type SetUnhealthyNodeReplacementOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationSetUnhealthyNodeReplacementMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson11_serializeOpSetUnhealthyNodeReplacement{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpSetUnhealthyNodeReplacement{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "SetUnhealthyNodeReplacement"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpSetUnhealthyNodeReplacementValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSetUnhealthyNodeReplacement(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opSetUnhealthyNodeReplacement(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "SetUnhealthyNodeReplacement", + } +} diff --git a/service/emr/deserializers.go b/service/emr/deserializers.go index 30a858d1c68..4cbf97d0ff3 100644 --- a/service/emr/deserializers.go +++ b/service/emr/deserializers.go @@ -5188,6 +5188,91 @@ func awsAwsjson11_deserializeOpErrorSetTerminationProtection(response *smithyhtt } } +type awsAwsjson11_deserializeOpSetUnhealthyNodeReplacement struct { +} + +func (*awsAwsjson11_deserializeOpSetUnhealthyNodeReplacement) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpSetUnhealthyNodeReplacement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorSetUnhealthyNodeReplacement(response, &metadata) + } + output := &SetUnhealthyNodeReplacementOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorSetUnhealthyNodeReplacement(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson11_deserializeErrorInternalServerError(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsjson11_deserializeOpSetVisibleToAllUsers struct { } @@ -6884,6 +6969,15 @@ func awsAwsjson11_deserializeDocumentCluster(v **types.Cluster, value interface{ sv.TerminationProtected = ptr.Bool(jtv) } + case "UnhealthyNodeReplacement": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanObject to be of type *bool, got %T instead", value) + } + sv.UnhealthyNodeReplacement = ptr.Bool(jtv) + } + case "VisibleToAllUsers": if value != nil { jtv, ok := value.(bool) @@ -10419,6 +10513,15 @@ func awsAwsjson11_deserializeDocumentJobFlowInstancesDetail(v **types.JobFlowIns sv.TerminationProtected = ptr.Bool(jtv) } + case "UnhealthyNodeReplacement": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanObject to be of type *bool, got %T instead", value) + } + sv.UnhealthyNodeReplacement = ptr.Bool(jtv) + } + default: _, _ = key, value diff --git a/service/emr/generated.json b/service/emr/generated.json index 9ac6f937019..104eab4fad9 100644 --- a/service/emr/generated.json +++ b/service/emr/generated.json @@ -59,6 +59,7 @@ "api_op_RunJobFlow.go", "api_op_SetKeepJobFlowAliveWhenNoSteps.go", "api_op_SetTerminationProtection.go", + "api_op_SetUnhealthyNodeReplacement.go", "api_op_SetVisibleToAllUsers.go", "api_op_StartNotebookExecution.go", "api_op_StopNotebookExecution.go", diff --git a/service/emr/serializers.go b/service/emr/serializers.go index e9143d69c65..5bfdcdfc04d 100644 --- a/service/emr/serializers.go +++ b/service/emr/serializers.go @@ -2712,6 +2712,61 @@ func (m *awsAwsjson11_serializeOpSetTerminationProtection) HandleSerialize(ctx c return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpSetUnhealthyNodeReplacement struct { +} + +func (*awsAwsjson11_serializeOpSetUnhealthyNodeReplacement) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpSetUnhealthyNodeReplacement) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SetUnhealthyNodeReplacementInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("ElasticMapReduce.SetUnhealthyNodeReplacement") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentSetUnhealthyNodeReplacementInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpSetVisibleToAllUsers struct { } @@ -3975,6 +4030,11 @@ func awsAwsjson11_serializeDocumentJobFlowInstancesConfig(v *types.JobFlowInstan ok.Boolean(*v.TerminationProtected) } + if v.UnhealthyNodeReplacement != nil { + ok := object.Key("UnhealthyNodeReplacement") + ok.Boolean(*v.UnhealthyNodeReplacement) + } + return nil } @@ -5875,6 +5935,25 @@ func awsAwsjson11_serializeOpDocumentSetTerminationProtectionInput(v *SetTermina return nil } +func awsAwsjson11_serializeOpDocumentSetUnhealthyNodeReplacementInput(v *SetUnhealthyNodeReplacementInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.JobFlowIds != nil { + ok := object.Key("JobFlowIds") + if err := awsAwsjson11_serializeDocumentXmlStringList(v.JobFlowIds, ok); err != nil { + return err + } + } + + if v.UnhealthyNodeReplacement != nil { + ok := object.Key("UnhealthyNodeReplacement") + ok.Boolean(*v.UnhealthyNodeReplacement) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentSetVisibleToAllUsersInput(v *SetVisibleToAllUsersInput, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/emr/types/types.go b/service/emr/types/types.go index 71f1faa11f0..9f9f7a2037a 100644 --- a/service/emr/types/types.go +++ b/service/emr/types/types.go @@ -417,6 +417,10 @@ type Cluster struct { // event of a cluster error. TerminationProtected *bool + // Indicates whether Amazon EMR should gracefully replace Amazon EC2 core + // instances that have degraded within the cluster. + UnhealthyNodeReplacement *bool + // Indicates whether the cluster is visible to IAM principals in the Amazon Web // Services account associated with the cluster. When true , IAM principals in the // Amazon Web Services account can perform Amazon EMR cluster actions on the @@ -1756,6 +1760,10 @@ type JobFlowInstancesConfig struct { // error. TerminationProtected *bool + // Indicates whether Amazon EMR should gracefully replace core nodes that have + // degraded within the cluster. + UnhealthyNodeReplacement *bool + noSmithyDocumentSerde } @@ -1821,6 +1829,10 @@ type JobFlowInstancesDetail struct { // error. TerminationProtected *bool + // Indicates whether Amazon EMR should gracefully replace core nodes that have + // degraded within the cluster. + UnhealthyNodeReplacement *bool + noSmithyDocumentSerde } diff --git a/service/emr/validators.go b/service/emr/validators.go index c58a8e52399..2b9544d051b 100644 --- a/service/emr/validators.go +++ b/service/emr/validators.go @@ -810,6 +810,26 @@ func (m *validateOpSetTerminationProtection) HandleInitialize(ctx context.Contex return next.HandleInitialize(ctx, in) } +type validateOpSetUnhealthyNodeReplacement struct { +} + +func (*validateOpSetUnhealthyNodeReplacement) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpSetUnhealthyNodeReplacement) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*SetUnhealthyNodeReplacementInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpSetUnhealthyNodeReplacementInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpSetVisibleToAllUsers struct { } @@ -1090,6 +1110,10 @@ func addOpSetTerminationProtectionValidationMiddleware(stack *middleware.Stack) return stack.Initialize.Add(&validateOpSetTerminationProtection{}, middleware.After) } +func addOpSetUnhealthyNodeReplacementValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpSetUnhealthyNodeReplacement{}, middleware.After) +} + func addOpSetVisibleToAllUsersValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpSetVisibleToAllUsers{}, middleware.After) } @@ -2668,6 +2692,24 @@ func validateOpSetTerminationProtectionInput(v *SetTerminationProtectionInput) e } } +func validateOpSetUnhealthyNodeReplacementInput(v *SetUnhealthyNodeReplacementInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SetUnhealthyNodeReplacementInput"} + if v.JobFlowIds == nil { + invalidParams.Add(smithy.NewErrParamRequired("JobFlowIds")) + } + if v.UnhealthyNodeReplacement == nil { + invalidParams.Add(smithy.NewErrParamRequired("UnhealthyNodeReplacement")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpSetVisibleToAllUsersInput(v *SetVisibleToAllUsersInput) error { if v == nil { return nil diff --git a/service/firehose/api_op_CreateDeliveryStream.go b/service/firehose/api_op_CreateDeliveryStream.go index a3f7f1ab4cf..40bbe8b5af4 100644 --- a/service/firehose/api_op_CreateDeliveryStream.go +++ b/service/firehose/api_op_CreateDeliveryStream.go @@ -12,8 +12,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a Kinesis Data Firehose delivery stream. By default, you can create up -// to 50 delivery streams per Amazon Web Services Region. This is an asynchronous +// Creates a Firehose delivery stream. By default, you can create up to 50 +// delivery streams per Amazon Web Services Region. This is an asynchronous // operation that immediately returns. The initial status of the delivery stream is // CREATING . After the delivery stream is created, its status is ACTIVE and it // now accepts data. If the delivery stream creation fails, the status transitions @@ -22,10 +22,10 @@ import ( // use DescribeDeliveryStream . If the status of a delivery stream is // CREATING_FAILED , this status doesn't change, and you can't invoke // CreateDeliveryStream again on it. However, you can invoke the -// DeleteDeliveryStream operation to delete it. A Kinesis Data Firehose delivery -// stream can be configured to receive records directly from providers using -// PutRecord or PutRecordBatch , or it can be configured to use an existing Kinesis -// stream as its source. To specify a Kinesis data stream as input, set the +// DeleteDeliveryStream operation to delete it. A Firehose delivery stream can be +// configured to receive records directly from providers using PutRecord or +// PutRecordBatch , or it can be configured to use an existing Kinesis stream as +// its source. To specify a Kinesis data stream as input, set the // DeliveryStreamType parameter to KinesisStreamAsSource , and provide the Kinesis // stream Amazon Resource Name (ARN) and role ARN in the // KinesisStreamSourceConfiguration parameter. To create a delivery stream with @@ -44,32 +44,30 @@ import ( // SplunkDestinationConfiguration . When you specify S3DestinationConfiguration , // you can also provide the following optional values: BufferingHints, // EncryptionConfiguration , and CompressionFormat . By default, if no -// BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB -// or for 5 minutes, whichever condition is satisfied first. BufferingHints is a -// hint, so there are some cases where the service cannot adhere to these -// conditions strictly. For example, record boundaries might be such that the size -// is a little over or under the configured buffering size. By default, no -// encryption is performed. We strongly recommend that you enable encryption to -// ensure secure data storage in Amazon S3. A few notes about Amazon Redshift as a -// destination: +// BufferingHints value is provided, Firehose buffers data up to 5 MB or for 5 +// minutes, whichever condition is satisfied first. BufferingHints is a hint, so +// there are some cases where the service cannot adhere to these conditions +// strictly. For example, record boundaries might be such that the size is a little +// over or under the configured buffering size. By default, no encryption is +// performed. We strongly recommend that you enable encryption to ensure secure +// data storage in Amazon S3. A few notes about Amazon Redshift as a destination: // - An Amazon Redshift destination requires an S3 bucket as intermediate -// location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses -// COPY syntax to load data into an Amazon Redshift table. This is specified in -// the RedshiftDestinationConfiguration.S3Configuration parameter. +// location. Firehose first delivers data to Amazon S3 and then uses COPY syntax +// to load data into an Amazon Redshift table. This is specified in the +// RedshiftDestinationConfiguration.S3Configuration parameter. // - The compression formats SNAPPY or ZIP cannot be specified in // RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift // COPY operation that reads from the S3 bucket doesn't support these compression // formats. // - We strongly recommend that you use the user name and password you provide -// exclusively with Kinesis Data Firehose, and that the permissions for the account -// are restricted for Amazon Redshift INSERT permissions. +// exclusively with Firehose, and that the permissions for the account are +// restricted for Amazon Redshift INSERT permissions. // -// Kinesis Data Firehose assumes the IAM role that is configured as part of the -// destination. The role should allow the Kinesis Data Firehose principal to assume -// the role, and the role should have permissions that allow the service to deliver -// the data. For more information, see Grant Kinesis Data Firehose Access to an -// Amazon S3 Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) -// in the Amazon Kinesis Data Firehose Developer Guide. +// Firehose assumes the IAM role that is configured as part of the destination. +// The role should allow the Firehose principal to assume the role, and the role +// should have permissions that allow the service to deliver the data. For more +// information, see Grant Firehose Access to an Amazon S3 Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) +// in the Amazon Firehose Developer Guide. func (c *Client) CreateDeliveryStream(ctx context.Context, params *CreateDeliveryStreamInput, optFns ...func(*Options)) (*CreateDeliveryStreamOutput, error) { if params == nil { params = &CreateDeliveryStreamInput{} diff --git a/service/firehose/api_op_DeleteDeliveryStream.go b/service/firehose/api_op_DeleteDeliveryStream.go index 5b23380455c..3ec6c6a4391 100644 --- a/service/firehose/api_op_DeleteDeliveryStream.go +++ b/service/firehose/api_op_DeleteDeliveryStream.go @@ -11,15 +11,20 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes a delivery stream and its data. To check the state of a delivery -// stream, use DescribeDeliveryStream . You can delete a delivery stream only if it -// is in one of the following states: ACTIVE , DELETING , CREATING_FAILED , or -// DELETING_FAILED . You can't delete a delivery stream that is in the CREATING -// state. While the deletion request is in process, the delivery stream is in the -// DELETING state. While the delivery stream is in the DELETING state, the service -// might continue to accept records, but it doesn't make any guarantees with -// respect to delivering the data. Therefore, as a best practice, first stop any -// applications that are sending records before you delete a delivery stream. +// Deletes a delivery stream and its data. You can delete a delivery stream only +// if it is in one of the following states: ACTIVE , DELETING , CREATING_FAILED , +// or DELETING_FAILED . You can't delete a delivery stream that is in the CREATING +// state. To check the state of a delivery stream, use DescribeDeliveryStream . +// DeleteDeliveryStream is an asynchronous API. When an API request to +// DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and +// it goes into the DELETING state.While the delivery stream is in the DELETING +// state, the service might continue to accept records, but it doesn't make any +// guarantees with respect to delivering the data. Therefore, as a best practice, +// first stop any applications that are sending records before you delete a +// delivery stream. Removal of a delivery stream that is in the DELETING state is +// a low priority operation for the service. A stream may remain in the DELETING +// state for several minutes. Therefore, as a best practice, applications should +// not wait for streams in the DELETING state to be removed. func (c *Client) DeleteDeliveryStream(ctx context.Context, params *DeleteDeliveryStreamInput, optFns ...func(*Options)) (*DeleteDeliveryStreamOutput, error) { if params == nil { params = &DeleteDeliveryStreamInput{} @@ -42,14 +47,13 @@ type DeleteDeliveryStreamInput struct { // This member is required. DeliveryStreamName *string - // Set this to true if you want to delete the delivery stream even if Kinesis Data - // Firehose is unable to retire the grant for the CMK. Kinesis Data Firehose might - // be unable to retire the grant due to a customer error, such as when the CMK or - // the grant are in an invalid state. If you force deletion, you can then use the - // RevokeGrant (https://docs.aws.amazon.com/kms/latest/APIReference/API_RevokeGrant.html) - // operation to revoke the grant you gave to Kinesis Data Firehose. If a failure to - // retire the grant happens due to an Amazon Web Services KMS issue, Kinesis Data - // Firehose keeps retrying the delete operation. The default value is false. + // Set this to true if you want to delete the delivery stream even if Firehose is + // unable to retire the grant for the CMK. Firehose might be unable to retire the + // grant due to a customer error, such as when the CMK or the grant are in an + // invalid state. If you force deletion, you can then use the RevokeGrant (https://docs.aws.amazon.com/kms/latest/APIReference/API_RevokeGrant.html) + // operation to revoke the grant you gave to Firehose. If a failure to retire the + // grant happens due to an Amazon Web Services KMS issue, Firehose keeps retrying + // the delete operation. The default value is false. AllowForceDelete *bool noSmithyDocumentSerde diff --git a/service/firehose/api_op_DescribeDeliveryStream.go b/service/firehose/api_op_DescribeDeliveryStream.go index 931e6efaef5..1036ed4d729 100644 --- a/service/firehose/api_op_DescribeDeliveryStream.go +++ b/service/firehose/api_op_DescribeDeliveryStream.go @@ -43,7 +43,7 @@ type DescribeDeliveryStreamInput struct { DeliveryStreamName *string // The ID of the destination to start returning the destination information. - // Kinesis Data Firehose supports one destination per delivery stream. + // Firehose supports one destination per delivery stream. ExclusiveStartDestinationId *string // The limit on the number of destinations to return. You can have one destination diff --git a/service/firehose/api_op_PutRecord.go b/service/firehose/api_op_PutRecord.go index bfbf27fbee6..3415edaeb41 100644 --- a/service/firehose/api_op_PutRecord.go +++ b/service/firehose/api_op_PutRecord.go @@ -12,41 +12,41 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Writes a single data record into an Amazon Kinesis Data Firehose delivery -// stream. To write multiple data records into a delivery stream, use -// PutRecordBatch . Applications using these operations are referred to as -// producers. By default, each delivery stream can take in up to 2,000 transactions -// per second, 5,000 records per second, or 5 MB per second. If you use PutRecord -// and PutRecordBatch , the limits are an aggregate across these two operations for -// each delivery stream. For more information about limits and how to request an -// increase, see Amazon Kinesis Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html) -// . Kinesis Data Firehose accumulates and publishes a particular metric for a -// customer account in one minute intervals. It is possible that the bursts of -// incoming bytes/records ingested to a delivery stream last only for a few -// seconds. Due to this, the actual spikes in the traffic might not be fully -// visible in the customer's 1 minute CloudWatch metrics. You must specify the name -// of the delivery stream and the data record when using PutRecord . The data -// record consists of a data blob that can be up to 1,000 KiB in size, and any kind -// of data. For example, it can be a segment from a log file, geographic location -// data, website clickstream data, and so on. Kinesis Data Firehose buffers records -// before delivering them to the destination. To disambiguate the data blobs at the -// destination, a common solution is to use delimiters in the data, such as a -// newline ( \n ) or some other character unique within the data. This allows the -// consumer application to parse individual data items when reading the data from -// the destination. The PutRecord operation returns a RecordId , which is a unique -// string assigned to each record. Producer applications can use this ID for -// purposes such as auditability and investigation. If the PutRecord operation -// throws a ServiceUnavailableException , the API is automatically reinvoked -// (retried) 3 times. If the exception persists, it is possible that the throughput -// limits have been exceeded for the delivery stream. Re-invoking the Put API -// operations (for example, PutRecord and PutRecordBatch) can result in data -// duplicates. For larger data assets, allow for a longer time out before retrying -// Put API operations. Data records sent to Kinesis Data Firehose are stored for 24 -// hours from the time they are added to a delivery stream as it tries to send the -// records to the destination. If the destination is unreachable for more than 24 -// hours, the data is no longer available. Don't concatenate two or more base64 -// strings to form the data fields of your records. Instead, concatenate the raw -// data, then perform base64 encoding. +// Writes a single data record into an Amazon Firehose delivery stream. To write +// multiple data records into a delivery stream, use PutRecordBatch . Applications +// using these operations are referred to as producers. By default, each delivery +// stream can take in up to 2,000 transactions per second, 5,000 records per +// second, or 5 MB per second. If you use PutRecord and PutRecordBatch , the limits +// are an aggregate across these two operations for each delivery stream. For more +// information about limits and how to request an increase, see Amazon Firehose +// Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html) . Firehose +// accumulates and publishes a particular metric for a customer account in one +// minute intervals. It is possible that the bursts of incoming bytes/records +// ingested to a delivery stream last only for a few seconds. Due to this, the +// actual spikes in the traffic might not be fully visible in the customer's 1 +// minute CloudWatch metrics. You must specify the name of the delivery stream and +// the data record when using PutRecord . The data record consists of a data blob +// that can be up to 1,000 KiB in size, and any kind of data. For example, it can +// be a segment from a log file, geographic location data, website clickstream +// data, and so on. Firehose buffers records before delivering them to the +// destination. To disambiguate the data blobs at the destination, a common +// solution is to use delimiters in the data, such as a newline ( \n ) or some +// other character unique within the data. This allows the consumer application to +// parse individual data items when reading the data from the destination. The +// PutRecord operation returns a RecordId , which is a unique string assigned to +// each record. Producer applications can use this ID for purposes such as +// auditability and investigation. If the PutRecord operation throws a +// ServiceUnavailableException , the API is automatically reinvoked (retried) 3 +// times. If the exception persists, it is possible that the throughput limits have +// been exceeded for the delivery stream. Re-invoking the Put API operations (for +// example, PutRecord and PutRecordBatch) can result in data duplicates. For larger +// data assets, allow for a longer time out before retrying Put API operations. +// Data records sent to Firehose are stored for 24 hours from the time they are +// added to a delivery stream as it tries to send the records to the destination. +// If the destination is unreachable for more than 24 hours, the data is no longer +// available. Don't concatenate two or more base64 strings to form the data fields +// of your records. Instead, concatenate the raw data, then perform base64 +// encoding. func (c *Client) PutRecord(ctx context.Context, params *PutRecordInput, optFns ...func(*Options)) (*PutRecordOutput, error) { if params == nil { params = &PutRecordInput{} diff --git a/service/firehose/api_op_PutRecordBatch.go b/service/firehose/api_op_PutRecordBatch.go index b463ea5827f..3906c3cf8a0 100644 --- a/service/firehose/api_op_PutRecordBatch.go +++ b/service/firehose/api_op_PutRecordBatch.go @@ -15,21 +15,20 @@ import ( // Writes multiple data records into a delivery stream in a single call, which can // achieve higher throughput per producer than when writing single records. To // write single data records into a delivery stream, use PutRecord . Applications -// using these operations are referred to as producers. Kinesis Data Firehose -// accumulates and publishes a particular metric for a customer account in one -// minute intervals. It is possible that the bursts of incoming bytes/records -// ingested to a delivery stream last only for a few seconds. Due to this, the -// actual spikes in the traffic might not be fully visible in the customer's 1 -// minute CloudWatch metrics. For information about service quota, see Amazon -// Kinesis Data Firehose Quota (https://docs.aws.amazon.com/firehose/latest/dev/limits.html) +// using these operations are referred to as producers. Firehose accumulates and +// publishes a particular metric for a customer account in one minute intervals. It +// is possible that the bursts of incoming bytes/records ingested to a delivery +// stream last only for a few seconds. Due to this, the actual spikes in the +// traffic might not be fully visible in the customer's 1 minute CloudWatch +// metrics. For information about service quota, see Amazon Firehose Quota (https://docs.aws.amazon.com/firehose/latest/dev/limits.html) // . Each PutRecordBatch request supports up to 500 records. Each record in the // request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 // MB for the entire request. These limits cannot be changed. You must specify the // name of the delivery stream and the data record when using PutRecord . The data // record consists of a data blob that can be up to 1,000 KB in size, and any kind // of data. For example, it could be a segment from a log file, geographic location -// data, website clickstream data, and so on. Kinesis Data Firehose buffers records -// before delivering them to the destination. To disambiguate the data blobs at the +// data, website clickstream data, and so on. Firehose buffers records before +// delivering them to the destination. To disambiguate the data blobs at the // destination, a common solution is to use delimiters in the data, such as a // newline ( \n ) or some other character unique within the data. This allows the // consumer application to parse individual data items when reading the data from @@ -42,29 +41,29 @@ import ( // the request array using the same ordering, from the top to the bottom. The // response array always includes the same number of records as the request array. // RequestResponses includes both successfully and unsuccessfully processed -// records. Kinesis Data Firehose tries to process all records in each -// PutRecordBatch request. A single record failure does not stop the processing of -// subsequent records. A successfully processed record includes a RecordId value, -// which is unique for the record. An unsuccessfully processed record includes -// ErrorCode and ErrorMessage values. ErrorCode reflects the type of error, and is -// one of the following values: ServiceUnavailableException or InternalFailure . -// ErrorMessage provides more detailed information about the error. If there is an -// internal server error or a timeout, the write might have completed or it might -// have failed. If FailedPutCount is greater than 0, retry the request, resending -// only those records that might have failed processing. This minimizes the -// possible duplicate records and also reduces the total bytes sent (and -// corresponding charges). We recommend that you handle any duplicates at the -// destination. If PutRecordBatch throws ServiceUnavailableException , the API is -// automatically reinvoked (retried) 3 times. If the exception persists, it is -// possible that the throughput limits have been exceeded for the delivery stream. -// Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) -// can result in data duplicates. For larger data assets, allow for a longer time -// out before retrying Put API operations. Data records sent to Kinesis Data -// Firehose are stored for 24 hours from the time they are added to a delivery -// stream as it attempts to send the records to the destination. If the destination -// is unreachable for more than 24 hours, the data is no longer available. Don't -// concatenate two or more base64 strings to form the data fields of your records. -// Instead, concatenate the raw data, then perform base64 encoding. +// records. Firehose tries to process all records in each PutRecordBatch request. +// A single record failure does not stop the processing of subsequent records. A +// successfully processed record includes a RecordId value, which is unique for +// the record. An unsuccessfully processed record includes ErrorCode and +// ErrorMessage values. ErrorCode reflects the type of error, and is one of the +// following values: ServiceUnavailableException or InternalFailure . ErrorMessage +// provides more detailed information about the error. If there is an internal +// server error or a timeout, the write might have completed or it might have +// failed. If FailedPutCount is greater than 0, retry the request, resending only +// those records that might have failed processing. This minimizes the possible +// duplicate records and also reduces the total bytes sent (and corresponding +// charges). We recommend that you handle any duplicates at the destination. If +// PutRecordBatch throws ServiceUnavailableException , the API is automatically +// reinvoked (retried) 3 times. If the exception persists, it is possible that the +// throughput limits have been exceeded for the delivery stream. Re-invoking the +// Put API operations (for example, PutRecord and PutRecordBatch) can result in +// data duplicates. For larger data assets, allow for a longer time out before +// retrying Put API operations. Data records sent to Firehose are stored for 24 +// hours from the time they are added to a delivery stream as it attempts to send +// the records to the destination. If the destination is unreachable for more than +// 24 hours, the data is no longer available. Don't concatenate two or more base64 +// strings to form the data fields of your records. Instead, concatenate the raw +// data, then perform base64 encoding. func (c *Client) PutRecordBatch(ctx context.Context, params *PutRecordBatchInput, optFns ...func(*Options)) (*PutRecordBatchOutput, error) { if params == nil { params = &PutRecordBatchInput{} diff --git a/service/firehose/api_op_StartDeliveryStreamEncryption.go b/service/firehose/api_op_StartDeliveryStreamEncryption.go index 1bca0f034f9..5c742be3650 100644 --- a/service/firehose/api_op_StartDeliveryStreamEncryption.go +++ b/service/firehose/api_op_StartDeliveryStreamEncryption.go @@ -13,9 +13,9 @@ import ( ) // Enables server-side encryption (SSE) for the delivery stream. This operation is -// asynchronous. It returns immediately. When you invoke it, Kinesis Data Firehose -// first sets the encryption status of the stream to ENABLING , and then to ENABLED -// . The encryption status of a delivery stream is the Status property in +// asynchronous. It returns immediately. When you invoke it, Firehose first sets +// the encryption status of the stream to ENABLING , and then to ENABLED . The +// encryption status of a delivery stream is the Status property in // DeliveryStreamEncryptionConfiguration . If the operation fails, the encryption // status changes to ENABLING_FAILED . You can continue to read and write data to // your delivery stream while the encryption status is ENABLING , but the data is @@ -27,22 +27,21 @@ import ( // DescribeDeliveryStream . Even if encryption is currently enabled for a delivery // stream, you can still invoke this operation on it to change the ARN of the CMK // or both its type and ARN. If you invoke this method to change the CMK, and the -// old CMK is of type CUSTOMER_MANAGED_CMK , Kinesis Data Firehose schedules the -// grant it had on the old CMK for retirement. If the new CMK is of type -// CUSTOMER_MANAGED_CMK , Kinesis Data Firehose creates a grant that enables it to -// use the new CMK to encrypt and decrypt data and to manage the grant. For the KMS -// grant creation to be successful, Kinesis Data Firehose APIs -// StartDeliveryStreamEncryption and CreateDeliveryStream should not be called -// with session credentials that are more than 6 hours old. If a delivery stream -// already has encryption enabled and then you invoke this operation to change the -// ARN of the CMK or both its type and ARN and you get ENABLING_FAILED , this only -// means that the attempt to change the CMK failed. In this case, encryption -// remains enabled with the old CMK. If the encryption status of your delivery -// stream is ENABLING_FAILED , you can invoke this operation again with a valid -// CMK. The CMK must be enabled and the key policy mustn't explicitly deny the -// permission for Kinesis Data Firehose to invoke KMS encrypt and decrypt -// operations. You can enable SSE for a delivery stream only if it's a delivery -// stream that uses DirectPut as its source. The StartDeliveryStreamEncryption and +// old CMK is of type CUSTOMER_MANAGED_CMK , Firehose schedules the grant it had on +// the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK , +// Firehose creates a grant that enables it to use the new CMK to encrypt and +// decrypt data and to manage the grant. For the KMS grant creation to be +// successful, Firehose APIs StartDeliveryStreamEncryption and CreateDeliveryStream +// should not be called with session credentials that are more than 6 hours old. If +// a delivery stream already has encryption enabled and then you invoke this +// operation to change the ARN of the CMK or both its type and ARN and you get +// ENABLING_FAILED , this only means that the attempt to change the CMK failed. In +// this case, encryption remains enabled with the old CMK. If the encryption status +// of your delivery stream is ENABLING_FAILED , you can invoke this operation again +// with a valid CMK. The CMK must be enabled and the key policy mustn't explicitly +// deny the permission for Firehose to invoke KMS encrypt and decrypt operations. +// You can enable SSE for a delivery stream only if it's a delivery stream that +// uses DirectPut as its source. The StartDeliveryStreamEncryption and // StopDeliveryStreamEncryption operations have a combined limit of 25 calls per // delivery stream per 24 hours. For example, you reach the limit if you call // StartDeliveryStreamEncryption 13 times and StopDeliveryStreamEncryption 12 diff --git a/service/firehose/api_op_StopDeliveryStreamEncryption.go b/service/firehose/api_op_StopDeliveryStreamEncryption.go index 41de8ebc208..d74ce792ddd 100644 --- a/service/firehose/api_op_StopDeliveryStreamEncryption.go +++ b/service/firehose/api_op_StopDeliveryStreamEncryption.go @@ -12,18 +12,18 @@ import ( ) // Disables server-side encryption (SSE) for the delivery stream. This operation -// is asynchronous. It returns immediately. When you invoke it, Kinesis Data -// Firehose first sets the encryption status of the stream to DISABLING , and then -// to DISABLED . You can continue to read and write data to your stream while its -// status is DISABLING . It can take up to 5 seconds after the encryption status -// changes to DISABLED before all records written to the delivery stream are no -// longer subject to encryption. To find out whether a record or a batch of records -// was encrypted, check the response elements PutRecordOutput$Encrypted and +// is asynchronous. It returns immediately. When you invoke it, Firehose first sets +// the encryption status of the stream to DISABLING , and then to DISABLED . You +// can continue to read and write data to your stream while its status is DISABLING +// . It can take up to 5 seconds after the encryption status changes to DISABLED +// before all records written to the delivery stream are no longer subject to +// encryption. To find out whether a record or a batch of records was encrypted, +// check the response elements PutRecordOutput$Encrypted and // PutRecordBatchOutput$Encrypted , respectively. To check the encryption state of // a delivery stream, use DescribeDeliveryStream . If SSE is enabled using a -// customer managed CMK and then you invoke StopDeliveryStreamEncryption , Kinesis -// Data Firehose schedules the related KMS grant for retirement and then retires it -// after it ensures that it is finished delivering records to the destination. The +// customer managed CMK and then you invoke StopDeliveryStreamEncryption , Firehose +// schedules the related KMS grant for retirement and then retires it after it +// ensures that it is finished delivering records to the destination. The // StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations have // a combined limit of 25 calls per delivery stream per 24 hours. For example, you // reach the limit if you call StartDeliveryStreamEncryption 13 times and diff --git a/service/firehose/api_op_UpdateDestination.go b/service/firehose/api_op_UpdateDestination.go index aa38d4d7dd9..13c515b5a8f 100644 --- a/service/firehose/api_op_UpdateDestination.go +++ b/service/firehose/api_op_UpdateDestination.go @@ -22,20 +22,20 @@ import ( // usually effective within a few minutes. Switching between Amazon OpenSearch // Service and other services is not supported. For an Amazon OpenSearch Service // destination, you can only update to another Amazon OpenSearch Service -// destination. If the destination type is the same, Kinesis Data Firehose merges -// the configuration parameters specified with the destination configuration that +// destination. If the destination type is the same, Firehose merges the +// configuration parameters specified with the destination configuration that // already exists on the delivery stream. If any of the parameters are not // specified in the call, the existing values are retained. For example, in the // Amazon S3 destination, if EncryptionConfiguration is not specified, then the // existing EncryptionConfiguration is maintained on the destination. If the // destination type is not the same, for example, changing the destination from -// Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any -// parameters. In this case, all parameters must be specified. Kinesis Data -// Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and -// conflicting merges. This is a required field, and the service updates the -// configuration only if the existing configuration has a version ID that matches. -// After the update is applied successfully, the version ID is updated, and can be -// retrieved using DescribeDeliveryStream . Use the new version ID to set +// Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this +// case, all parameters must be specified. Firehose uses +// CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. +// This is a required field, and the service updates the configuration only if the +// existing configuration has a version ID that matches. After the update is +// applied successfully, the version ID is updated, and can be retrieved using +// DescribeDeliveryStream . Use the new version ID to set // CurrentDeliveryStreamVersionId in the next call. func (c *Client) UpdateDestination(ctx context.Context, params *UpdateDestinationInput, optFns ...func(*Options)) (*UpdateDestinationOutput, error) { if params == nil { diff --git a/service/firehose/deserializers.go b/service/firehose/deserializers.go index e77327360c5..ae8891e437b 100644 --- a/service/firehose/deserializers.go +++ b/service/firehose/deserializers.go @@ -3258,6 +3258,15 @@ func awsAwsjson11_deserializeDocumentExtendedS3DestinationDescription(v **types. sv.CompressionFormat = types.CompressionFormat(jtv) } + case "CustomTimeZone": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomTimeZone to be of type string, got %T instead", value) + } + sv.CustomTimeZone = ptr.String(jtv) + } + case "DataFormatConversionConfiguration": if err := awsAwsjson11_deserializeDocumentDataFormatConversionConfiguration(&sv.DataFormatConversionConfiguration, value); err != nil { return err @@ -3282,6 +3291,15 @@ func awsAwsjson11_deserializeDocumentExtendedS3DestinationDescription(v **types. sv.ErrorOutputPrefix = ptr.String(jtv) } + case "FileExtension": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected FileExtension to be of type string, got %T instead", value) + } + sv.FileExtension = ptr.String(jtv) + } + case "Prefix": if value != nil { jtv, ok := value.(string) diff --git a/service/firehose/doc.go b/service/firehose/doc.go index 3508b8fc23b..efdd14a571a 100644 --- a/service/firehose/doc.go +++ b/service/firehose/doc.go @@ -3,8 +3,8 @@ // Package firehose provides the API client, operations, and parameter types for // Amazon Kinesis Firehose. // -// Amazon Kinesis Data Firehose API Reference Amazon Kinesis Data Firehose is a -// fully managed service that delivers real-time streaming data to destinations -// such as Amazon Simple Storage Service (Amazon S3), Amazon OpenSearch Service, -// Amazon Redshift, Splunk, and various other supportd destinations. +// Amazon Data Firehose Amazon Data Firehose is a fully managed service that +// delivers real-time streaming data to destinations such as Amazon Simple Storage +// Service (Amazon S3), Amazon OpenSearch Service, Amazon Redshift, Splunk, and +// various other supportd destinations. package firehose diff --git a/service/firehose/serializers.go b/service/firehose/serializers.go index 7f2f8037a39..19480ef1d64 100644 --- a/service/firehose/serializers.go +++ b/service/firehose/serializers.go @@ -1466,6 +1466,11 @@ func awsAwsjson11_serializeDocumentExtendedS3DestinationConfiguration(v *types.E ok.String(string(v.CompressionFormat)) } + if v.CustomTimeZone != nil { + ok := object.Key("CustomTimeZone") + ok.String(*v.CustomTimeZone) + } + if v.DataFormatConversionConfiguration != nil { ok := object.Key("DataFormatConversionConfiguration") if err := awsAwsjson11_serializeDocumentDataFormatConversionConfiguration(v.DataFormatConversionConfiguration, ok); err != nil { @@ -1492,6 +1497,11 @@ func awsAwsjson11_serializeDocumentExtendedS3DestinationConfiguration(v *types.E ok.String(*v.ErrorOutputPrefix) } + if v.FileExtension != nil { + ok := object.Key("FileExtension") + ok.String(*v.FileExtension) + } + if v.Prefix != nil { ok := object.Key("Prefix") ok.String(*v.Prefix) @@ -1552,6 +1562,11 @@ func awsAwsjson11_serializeDocumentExtendedS3DestinationUpdate(v *types.Extended ok.String(string(v.CompressionFormat)) } + if v.CustomTimeZone != nil { + ok := object.Key("CustomTimeZone") + ok.String(*v.CustomTimeZone) + } + if v.DataFormatConversionConfiguration != nil { ok := object.Key("DataFormatConversionConfiguration") if err := awsAwsjson11_serializeDocumentDataFormatConversionConfiguration(v.DataFormatConversionConfiguration, ok); err != nil { @@ -1578,6 +1593,11 @@ func awsAwsjson11_serializeDocumentExtendedS3DestinationUpdate(v *types.Extended ok.String(*v.ErrorOutputPrefix) } + if v.FileExtension != nil { + ok := object.Key("FileExtension") + ok.String(*v.FileExtension) + } + if v.Prefix != nil { ok := object.Key("Prefix") ok.String(*v.Prefix) diff --git a/service/firehose/types/enums.go b/service/firehose/types/enums.go index 19b1008a116..216c58589c3 100644 --- a/service/firehose/types/enums.go +++ b/service/firehose/types/enums.go @@ -461,6 +461,7 @@ const ( ProcessorParameterNameSubRecordType ProcessorParameterName = "SubRecordType" ProcessorParameterNameDelimiter ProcessorParameterName = "Delimiter" ProcessorParameterNameCompressionFormat ProcessorParameterName = "CompressionFormat" + ProcessorParameterNameDataMessageExtraction ProcessorParameterName = "DataMessageExtraction" ) // Values returns all known values for ProcessorParameterName. Note that this can @@ -478,6 +479,7 @@ func (ProcessorParameterName) Values() []ProcessorParameterName { "SubRecordType", "Delimiter", "CompressionFormat", + "DataMessageExtraction", } } @@ -487,6 +489,7 @@ type ProcessorType string const ( ProcessorTypeRecordDeAggregation ProcessorType = "RecordDeAggregation" ProcessorTypeDecompression ProcessorType = "Decompression" + ProcessorTypeCloudWatchLogProcessing ProcessorType = "CloudWatchLogProcessing" ProcessorTypeLambda ProcessorType = "Lambda" ProcessorTypeMetadataExtraction ProcessorType = "MetadataExtraction" ProcessorTypeAppendDelimiterToRecord ProcessorType = "AppendDelimiterToRecord" @@ -499,6 +502,7 @@ func (ProcessorType) Values() []ProcessorType { return []ProcessorType{ "RecordDeAggregation", "Decompression", + "CloudWatchLogProcessing", "Lambda", "MetadataExtraction", "AppendDelimiterToRecord", diff --git a/service/firehose/types/errors.go b/service/firehose/types/errors.go index af754ad4cdf..229731ee86e 100644 --- a/service/firehose/types/errors.go +++ b/service/firehose/types/errors.go @@ -60,9 +60,9 @@ func (e *InvalidArgumentException) ErrorCode() string { } func (e *InvalidArgumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// Kinesis Data Firehose throws this exception when an attempt to put records or -// to start or stop delivery stream encryption fails. This happens when the KMS -// service throws one of the following exception types: AccessDeniedException , +// Firehose throws this exception when an attempt to put records or to start or +// stop delivery stream encryption fails. This happens when the KMS service throws +// one of the following exception types: AccessDeniedException , // InvalidStateException , DisabledException , or NotFoundException . type InvalidKMSResourceException struct { Message *string @@ -201,7 +201,7 @@ func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smit // The service is unavailable. Back off and retry the operation. If you continue // to see the exception, throughput limits for the delivery stream may have been // exceeded. For more information about limits and how to request an increase, see -// Amazon Kinesis Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html) +// Amazon Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html) // . type ServiceUnavailableException struct { Message *string diff --git a/service/firehose/types/types.go b/service/firehose/types/types.go index 84632d8ae79..8f581547075 100644 --- a/service/firehose/types/types.go +++ b/service/firehose/types/types.go @@ -34,9 +34,9 @@ type AmazonOpenSearchServerlessDestinationConfiguration struct { // This member is required. IndexName *string - // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data - // Firehose for calling the Serverless offering for Amazon OpenSearch Service - // Configuration API and for indexing documents. + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for + // calling the Serverless offering for Amazon OpenSearch Service Configuration API + // and for indexing documents. // // This member is required. RoleARN *string @@ -60,18 +60,17 @@ type AmazonOpenSearchServerlessDestinationConfiguration struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to the Serverless offering for Amazon OpenSearch Service. The default value is - // 300 (5 minutes). + // The retry behavior in case Firehose is unable to deliver documents to the + // Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 + // minutes). RetryOptions *AmazonOpenSearchServerlessRetryOptions // Defines how documents should be delivered to Amazon S3. When it is set to - // FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not - // be indexed to the configured Amazon S3 destination, with - // AmazonOpenSearchService-failed/ appended to the key prefix. When set to - // AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, - // and also writes failed documents with AmazonOpenSearchService-failed/ appended - // to the prefix. + // FailedDocumentsOnly, Firehose writes any documents that could not be indexed to + // the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ + // appended to the key prefix. When set to AllDocuments, Firehose delivers all + // incoming records to Amazon S3, and also writes failed documents with + // AmazonOpenSearchService-failed/ appended to the prefix. S3BackupMode AmazonOpenSearchServerlessS3BackupMode // The details of the VPC of the Amazon OpenSearch or Amazon OpenSearch Serverless @@ -140,14 +139,14 @@ type AmazonOpenSearchServerlessDestinationUpdate struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to the Serverless offering for Amazon OpenSearch Service. The default value is - // 300 (5 minutes). + // The retry behavior in case Firehose is unable to deliver documents to the + // Serverless offering for Amazon OpenSearch Service. The default value is 300 (5 + // minutes). RetryOptions *AmazonOpenSearchServerlessRetryOptions - // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data - // Firehose for calling the Serverless offering for Amazon OpenSearch Service - // Configuration API and for indexing documents. + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for + // calling the Serverless offering for Amazon OpenSearch Service Configuration API + // and for indexing documents. RoleARN *string // Describes an update for a destination in Amazon S3. @@ -156,15 +155,15 @@ type AmazonOpenSearchServerlessDestinationUpdate struct { noSmithyDocumentSerde } -// Configures retry behavior in case Kinesis Data Firehose is unable to deliver -// documents to the Serverless offering for Amazon OpenSearch Service. +// Configures retry behavior in case Firehose is unable to deliver documents to +// the Serverless offering for Amazon OpenSearch Service. type AmazonOpenSearchServerlessRetryOptions struct { // After an initial failure to deliver to the Serverless offering for Amazon - // OpenSearch Service, the total amount of time during which Kinesis Data Firehose - // retries delivery (including the first attempt). After this time has elapsed, the - // failed documents are written to Amazon S3. Default value is 300 seconds (5 - // minutes). A value of 0 (zero) results in no retries. + // OpenSearch Service, the total amount of time during which Firehose retries + // delivery (including the first attempt). After this time has elapsed, the failed + // documents are written to Amazon S3. Default value is 300 seconds (5 minutes). A + // value of 0 (zero) results in no retries. DurationInSeconds *int32 noSmithyDocumentSerde @@ -196,9 +195,9 @@ type AmazonopensearchserviceDestinationConfiguration struct { // This member is required. IndexName *string - // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data - // Firehose for calling the Amazon OpenSearch Service Configuration API and for - // indexing documents. + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for + // calling the Amazon OpenSearch Service Configuration API and for indexing + // documents. // // This member is required. RoleARN *string @@ -220,8 +219,7 @@ type AmazonopensearchserviceDestinationConfiguration struct { ClusterEndpoint *string // Indicates the method for setting up document ID. The supported methods are - // Kinesis Data Firehose generated document ID and OpenSearch Service generated - // document ID. + // Firehose generated document ID and OpenSearch Service generated document ID. DocumentIdOptions *DocumentIdOptions // The ARN of the Amazon OpenSearch Service domain. The IAM role must have @@ -236,23 +234,21 @@ type AmazonopensearchserviceDestinationConfiguration struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon OpenSearch Service. The default value is 300 (5 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // OpenSearch Service. The default value is 300 (5 minutes). RetryOptions *AmazonopensearchserviceRetryOptions // Defines how documents should be delivered to Amazon S3. When it is set to - // FailedDocumentsOnly, Kinesis Data Firehose writes any documents that could not - // be indexed to the configured Amazon S3 destination, with - // AmazonOpenSearchService-failed/ appended to the key prefix. When set to - // AllDocuments, Kinesis Data Firehose delivers all incoming records to Amazon S3, - // and also writes failed documents with AmazonOpenSearchService-failed/ appended - // to the prefix. + // FailedDocumentsOnly, Firehose writes any documents that could not be indexed to + // the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ + // appended to the key prefix. When set to AllDocuments, Firehose delivers all + // incoming records to Amazon S3, and also writes failed documents with + // AmazonOpenSearchService-failed/ appended to the prefix. S3BackupMode AmazonopensearchserviceS3BackupMode // The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be // only one type per index. If you try to specify a new type for an existing index - // that already has another type, Kinesis Data Firehose returns an error during run - // time. + // that already has another type, Firehose returns an error during run time. TypeName *string // The details of the VPC of the Amazon OpenSearch or Amazon OpenSearch Serverless @@ -271,14 +267,13 @@ type AmazonopensearchserviceDestinationDescription struct { // Describes the Amazon CloudWatch logging options for your delivery stream. CloudWatchLoggingOptions *CloudWatchLoggingOptions - // The endpoint to use when communicating with the cluster. Kinesis Data Firehose - // uses either this ClusterEndpoint or the DomainARN field to send data to Amazon - // OpenSearch Service. + // The endpoint to use when communicating with the cluster. Firehose uses either + // this ClusterEndpoint or the DomainARN field to send data to Amazon OpenSearch + // Service. ClusterEndpoint *string // Indicates the method for setting up document ID. The supported methods are - // Kinesis Data Firehose generated document ID and OpenSearch Service generated - // document ID. + // Firehose generated document ID and OpenSearch Service generated document ID. DocumentIdOptions *DocumentIdOptions // The ARN of the Amazon OpenSearch Service domain. @@ -331,8 +326,7 @@ type AmazonopensearchserviceDestinationUpdate struct { ClusterEndpoint *string // Indicates the method for setting up document ID. The supported methods are - // Kinesis Data Firehose generated document ID and OpenSearch Service generated - // document ID. + // Firehose generated document ID and OpenSearch Service generated document ID. DocumentIdOptions *DocumentIdOptions // The ARN of the Amazon OpenSearch Service domain. The IAM role must have @@ -350,13 +344,13 @@ type AmazonopensearchserviceDestinationUpdate struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon OpenSearch Service. The default value is 300 (5 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // OpenSearch Service. The default value is 300 (5 minutes). RetryOptions *AmazonopensearchserviceRetryOptions - // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data - // Firehose for calling the Amazon OpenSearch Service Configuration API and for - // indexing documents. + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for + // calling the Amazon OpenSearch Service Configuration API and for indexing + // documents. RoleARN *string // Describes an update for a destination in Amazon S3. @@ -364,25 +358,25 @@ type AmazonopensearchserviceDestinationUpdate struct { // The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be // only one type per index. If you try to specify a new type for an existing index - // that already has another type, Kinesis Data Firehose returns an error during - // runtime. If you upgrade Elasticsearch from 6.x to 7.x and don’t update your - // delivery stream, Kinesis Data Firehose still delivers data to Elasticsearch with - // the old index name and type name. If you want to update your delivery stream - // with a new index name, provide an empty string for TypeName. + // that already has another type, Firehose returns an error during runtime. If you + // upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, + // Firehose still delivers data to Elasticsearch with the old index name and type + // name. If you want to update your delivery stream with a new index name, provide + // an empty string for TypeName. TypeName *string noSmithyDocumentSerde } -// Configures retry behavior in case Kinesis Data Firehose is unable to deliver -// documents to Amazon OpenSearch Service. +// Configures retry behavior in case Firehose is unable to deliver documents to +// Amazon OpenSearch Service. type AmazonopensearchserviceRetryOptions struct { // After an initial failure to deliver to Amazon OpenSearch Service, the total - // amount of time during which Kinesis Data Firehose retries delivery (including - // the first attempt). After this time has elapsed, the failed documents are - // written to Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 - // (zero) results in no retries. + // amount of time during which Firehose retries delivery (including the first + // attempt). After this time has elapsed, the failed documents are written to + // Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results + // in no retries. DurationInSeconds *int32 noSmithyDocumentSerde @@ -405,10 +399,10 @@ type AuthenticationConfiguration struct { } // Describes hints for the buffering to perform before delivering data to the -// destination. These options are treated as hints, and therefore Kinesis Data -// Firehose might choose to use different values when it is optimal. The SizeInMBs -// and IntervalInSeconds parameters are optional. However, if specify a value for -// one of them, you must also provide a value for the other. +// destination. These options are treated as hints, and therefore Firehose might +// choose to use different values when it is optimal. The SizeInMBs and +// IntervalInSeconds parameters are optional. However, if specify a value for one +// of them, you must also provide a value for the other. type BufferingHints struct { // Buffer incoming data for the specified period of time, in seconds, before @@ -457,15 +451,15 @@ type CopyCommand struct { // Optional parameters to use with the Amazon Redshift COPY command. For more // information, see the "Optional Parameters" section of Amazon Redshift COPY // command (https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html) . Some - // possible examples that would apply to Kinesis Data Firehose are as follows: - // delimiter '\t' lzop; - fields are delimited with "\t" (TAB character) and - // compressed using lzop. delimiter '|' - fields are delimited with "|" (this is - // the default delimiter). delimiter '|' escape - the delimiter should be escaped. - // fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - - // fields are fixed width in the source, with each width specified after every - // column in the table. JSON 's3://mybucket/jsonpaths.txt' - data is in JSON - // format, and the path specified is the format of the data. For more examples, see - // Amazon Redshift COPY command examples (https://docs.aws.amazon.com/redshift/latest/dg/r_COPY_command_examples.html) + // possible examples that would apply to Firehose are as follows: delimiter '\t' + // lzop; - fields are delimited with "\t" (TAB character) and compressed using + // lzop. delimiter '|' - fields are delimited with "|" (this is the default + // delimiter). delimiter '|' escape - the delimiter should be escaped. fixedwidth + // 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' - fields are + // fixed width in the source, with each width specified after every column in the + // table. JSON 's3://mybucket/jsonpaths.txt' - data is in JSON format, and the + // path specified is the format of the data. For more examples, see Amazon + // Redshift COPY command examples (https://docs.aws.amazon.com/redshift/latest/dg/r_COPY_command_examples.html) // . CopyOptions *string @@ -475,12 +469,12 @@ type CopyCommand struct { noSmithyDocumentSerde } -// Specifies that you want Kinesis Data Firehose to convert data from the JSON -// format to the Parquet or ORC format before writing it to Amazon S3. Kinesis Data -// Firehose uses the serializer and deserializer that you specify, in addition to -// the column information from the Amazon Web Services Glue table, to deserialize -// your input data from JSON and then serialize it to the Parquet or ORC format. -// For more information, see Kinesis Data Firehose Record Format Conversion (https://docs.aws.amazon.com/firehose/latest/dev/record-format-conversion.html) +// Specifies that you want Firehose to convert data from the JSON format to the +// Parquet or ORC format before writing it to Amazon S3. Firehose uses the +// serializer and deserializer that you specify, in addition to the column +// information from the Amazon Web Services Glue table, to deserialize your input +// data from JSON and then serialize it to the Parquet or ORC format. For more +// information, see Firehose Record Format Conversion (https://docs.aws.amazon.com/firehose/latest/dev/record-format-conversion.html) // . type DataFormatConversionConfiguration struct { @@ -488,14 +482,13 @@ type DataFormatConversionConfiguration struct { // while preserving the configuration details. Enabled *bool - // Specifies the deserializer that you want Kinesis Data Firehose to use to - // convert the format of your data from JSON. This parameter is required if Enabled - // is set to true. + // Specifies the deserializer that you want Firehose to use to convert the format + // of your data from JSON. This parameter is required if Enabled is set to true. InputFormatConfiguration *InputFormatConfiguration - // Specifies the serializer that you want Kinesis Data Firehose to use to convert - // the format of your data to the Parquet or ORC format. This parameter is required - // if Enabled is set to true. + // Specifies the serializer that you want Firehose to use to convert the format of + // your data to the Parquet or ORC format. This parameter is required if Enabled + // is set to true. OutputFormatConfiguration *OutputFormatConfiguration // Specifies the Amazon Web Services Glue Data Catalog table that contains the @@ -615,19 +608,19 @@ type DeliveryStreamEncryptionConfigurationInput struct { // default setting is Amazon Web Services_OWNED_CMK . For more information about // CMKs, see Customer Master Keys (CMKs) (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys) // . When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with - // KeyType set to CUSTOMER_MANAGED_CMK, Kinesis Data Firehose invokes the Amazon - // KMS operation CreateGrant (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateGrant.html) - // to create a grant that allows the Kinesis Data Firehose service to use the - // customer managed CMK to perform encryption and decryption. Kinesis Data Firehose - // manages that grant. When you invoke StartDeliveryStreamEncryption to change the - // CMK for a delivery stream that is encrypted with a customer managed CMK, Kinesis - // Data Firehose schedules the grant it had on the old CMK for retirement. You can - // use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If - // a CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this - // limit, Kinesis Data Firehose throws a LimitExceededException . To encrypt your - // delivery stream, use symmetric CMKs. Kinesis Data Firehose doesn't support - // asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About - // Symmetric and Asymmetric CMKs (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html) + // KeyType set to CUSTOMER_MANAGED_CMK, Firehose invokes the Amazon KMS operation + // CreateGrant (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateGrant.html) + // to create a grant that allows the Firehose service to use the customer managed + // CMK to perform encryption and decryption. Firehose manages that grant. When you + // invoke StartDeliveryStreamEncryption to change the CMK for a delivery stream + // that is encrypted with a customer managed CMK, Firehose schedules the grant it + // had on the old CMK for retirement. You can use a CMK of type + // CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If a + // CreateDeliveryStream or StartDeliveryStreamEncryption operation exceeds this + // limit, Firehose throws a LimitExceededException . To encrypt your delivery + // stream, use symmetric CMKs. Firehose doesn't support asymmetric CMKs. For + // information about symmetric and asymmetric CMKs, see About Symmetric and + // Asymmetric CMKs (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html) // in the Amazon Web Services Key Management Service developer guide. // // This member is required. @@ -635,29 +628,28 @@ type DeliveryStreamEncryptionConfigurationInput struct { // If you set KeyType to CUSTOMER_MANAGED_CMK , you must specify the Amazon // Resource Name (ARN) of the CMK. If you set KeyType to Amazon Web - // Services_OWNED_CMK , Kinesis Data Firehose uses a service-account CMK. + // Services_OWNED_CMK , Firehose uses a service-account CMK. KeyARN *string noSmithyDocumentSerde } -// The deserializer you want Kinesis Data Firehose to use for converting the input -// data from JSON. Kinesis Data Firehose then serializes the data to its final -// format using the Serializer . Kinesis Data Firehose supports two types of -// deserializers: the Apache Hive JSON SerDe (https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-JSON) +// The deserializer you want Firehose to use for converting the input data from +// JSON. Firehose then serializes the data to its final format using the Serializer +// . Firehose supports two types of deserializers: the Apache Hive JSON SerDe (https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-JSON) // and the OpenX JSON SerDe (https://github.com/rcongiu/Hive-JSON-Serde) . type Deserializer struct { - // The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for - // deserializing data, which means converting it from the JSON format in - // preparation for serializing it to the Parquet or ORC format. This is one of two - // deserializers you can choose, depending on which one offers the functionality - // you need. The other option is the OpenX SerDe. + // The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, + // which means converting it from the JSON format in preparation for serializing it + // to the Parquet or ORC format. This is one of two deserializers you can choose, + // depending on which one offers the functionality you need. The other option is + // the OpenX SerDe. HiveJsonSerDe *HiveJsonSerDe - // The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which - // means converting it from the JSON format in preparation for serializing it to - // the Parquet or ORC format. This is one of two deserializers you can choose, + // The OpenX SerDe. Used by Firehose for deserializing data, which means + // converting it from the JSON format in preparation for serializing it to the + // Parquet or ORC format. This is one of two deserializers you can choose, // depending on which one offers the functionality you need. The other option is // the native Hive / HCatalog JsonSerDe. OpenXJsonSerDe *OpenXJsonSerDe @@ -704,22 +696,20 @@ type DestinationDescription struct { } // Indicates the method for setting up document ID. The supported methods are -// Kinesis Data Firehose generated document ID and OpenSearch Service generated -// document ID. +// Firehose generated document ID and OpenSearch Service generated document ID. type DocumentIdOptions struct { - // When the FIREHOSE_DEFAULT option is chosen, Kinesis Data Firehose generates a - // unique document ID for each record based on a unique internal identifier. The - // generated document ID is stable across multiple delivery attempts, which helps - // prevent the same record from being indexed multiple times with different - // document IDs. When the NO_DOCUMENT_ID option is chosen, Kinesis Data Firehose - // does not include any document IDs in the requests it sends to the Amazon - // OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate - // document IDs. In case of multiple delivery attempts, this may cause the same - // record to be indexed more than once with different document IDs. This option - // enables write-heavy operations, such as the ingestion of logs and observability - // data, to consume less resources in the Amazon OpenSearch Service domain, - // resulting in improved performance. + // When the FIREHOSE_DEFAULT option is chosen, Firehose generates a unique + // document ID for each record based on a unique internal identifier. The generated + // document ID is stable across multiple delivery attempts, which helps prevent the + // same record from being indexed multiple times with different document IDs. When + // the NO_DOCUMENT_ID option is chosen, Firehose does not include any document IDs + // in the requests it sends to the Amazon OpenSearch Service. This causes the + // Amazon OpenSearch Service domain to generate document IDs. In case of multiple + // delivery attempts, this may cause the same record to be indexed more than once + // with different document IDs. This option enables write-heavy operations, such as + // the ingestion of logs and observability data, to consume less resources in the + // Amazon OpenSearch Service domain, resulting in improved performance. // // This member is required. DefaultDocumentIdFormat DefaultDocumentIdFormat @@ -732,12 +722,12 @@ type DocumentIdOptions struct { // Currently, dynamic partitioning is only supported for Amazon S3 destinations. type DynamicPartitioningConfiguration struct { - // Specifies that the dynamic partitioning is enabled for this Kinesis Data - // Firehose delivery stream. + // Specifies that the dynamic partitioning is enabled for this Firehose delivery + // stream. Enabled *bool - // The retry behavior in case Kinesis Data Firehose is unable to deliver data to - // an Amazon S3 prefix. + // The retry behavior in case Firehose is unable to deliver data to an Amazon S3 + // prefix. RetryOptions *RetryOptions noSmithyDocumentSerde @@ -769,10 +759,9 @@ type ElasticsearchDestinationConfiguration struct { // This member is required. IndexName *string - // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data - // Firehose for calling the Amazon ES Configuration API and for indexing documents. - // For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 - // Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for + // calling the Amazon ES Configuration API and for indexing documents. For more + // information, see Grant Firehose Access to an Amazon S3 Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) // and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // . // @@ -796,8 +785,7 @@ type ElasticsearchDestinationConfiguration struct { ClusterEndpoint *string // Indicates the method for setting up document ID. The supported methods are - // Kinesis Data Firehose generated document ID and OpenSearch Service generated - // document ID. + // Firehose generated document ID and OpenSearch Service generated document ID. DocumentIdOptions *DocumentIdOptions // The ARN of the Amazon ES domain. The IAM role must have permissions for @@ -816,26 +804,25 @@ type ElasticsearchDestinationConfiguration struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon ES. The default value is 300 (5 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // ES. The default value is 300 (5 minutes). RetryOptions *ElasticsearchRetryOptions // Defines how documents should be delivered to Amazon S3. When it is set to - // FailedDocumentsOnly , Kinesis Data Firehose writes any documents that could not - // be indexed to the configured Amazon S3 destination, with - // AmazonOpenSearchService-failed/ appended to the key prefix. When set to - // AllDocuments , Kinesis Data Firehose delivers all incoming records to Amazon S3, - // and also writes failed documents with AmazonOpenSearchService-failed/ appended - // to the prefix. For more information, see Amazon S3 Backup for the Amazon ES - // Destination (https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-s3-backup) + // FailedDocumentsOnly , Firehose writes any documents that could not be indexed to + // the configured Amazon S3 destination, with AmazonOpenSearchService-failed/ + // appended to the key prefix. When set to AllDocuments , Firehose delivers all + // incoming records to Amazon S3, and also writes failed documents with + // AmazonOpenSearchService-failed/ appended to the prefix. For more information, + // see Amazon S3 Backup for the Amazon ES Destination (https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-s3-backup) // . Default value is FailedDocumentsOnly . You can't change this backup mode after // you create the delivery stream. S3BackupMode ElasticsearchS3BackupMode // The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type // per index. If you try to specify a new type for an existing index that already - // has another type, Kinesis Data Firehose returns an error during run time. For - // Elasticsearch 7.x, don't specify a TypeName . + // has another type, Firehose returns an error during run time. For Elasticsearch + // 7.x, don't specify a TypeName . TypeName *string // The details of the VPC of the Amazon destination. @@ -853,20 +840,17 @@ type ElasticsearchDestinationDescription struct { // The Amazon CloudWatch logging options. CloudWatchLoggingOptions *CloudWatchLoggingOptions - // The endpoint to use when communicating with the cluster. Kinesis Data Firehose - // uses either this ClusterEndpoint or the DomainARN field to send data to Amazon - // ES. + // The endpoint to use when communicating with the cluster. Firehose uses either + // this ClusterEndpoint or the DomainARN field to send data to Amazon ES. ClusterEndpoint *string // Indicates the method for setting up document ID. The supported methods are - // Kinesis Data Firehose generated document ID and OpenSearch Service generated - // document ID. + // Firehose generated document ID and OpenSearch Service generated document ID. DocumentIdOptions *DocumentIdOptions // The ARN of the Amazon ES domain. For more information, see Amazon Resource // Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // . Kinesis Data Firehose uses either ClusterEndpoint or DomainARN to send data - // to Amazon ES. + // . Firehose uses either ClusterEndpoint or DomainARN to send data to Amazon ES. DomainARN *string // The Elasticsearch index name. @@ -920,8 +904,7 @@ type ElasticsearchDestinationUpdate struct { ClusterEndpoint *string // Indicates the method for setting up document ID. The supported methods are - // Kinesis Data Firehose generated document ID and OpenSearch Service generated - // document ID. + // Firehose generated document ID and OpenSearch Service generated document ID. DocumentIdOptions *DocumentIdOptions // The ARN of the Amazon ES domain. The IAM role must have permissions for @@ -943,14 +926,13 @@ type ElasticsearchDestinationUpdate struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon ES. The default value is 300 (5 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // ES. The default value is 300 (5 minutes). RetryOptions *ElasticsearchRetryOptions - // The Amazon Resource Name (ARN) of the IAM role to be assumed by Kinesis Data - // Firehose for calling the Amazon ES Configuration API and for indexing documents. - // For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 - // Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) + // The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for + // calling the Amazon ES Configuration API and for indexing documents. For more + // information, see Grant Firehose Access to an Amazon S3 Destination (https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) // and Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // . RoleARN *string @@ -960,25 +942,24 @@ type ElasticsearchDestinationUpdate struct { // The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type // per index. If you try to specify a new type for an existing index that already - // has another type, Kinesis Data Firehose returns an error during runtime. If you - // upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, - // Kinesis Data Firehose still delivers data to Elasticsearch with the old index - // name and type name. If you want to update your delivery stream with a new index - // name, provide an empty string for TypeName . + // has another type, Firehose returns an error during runtime. If you upgrade + // Elasticsearch from 6.x to 7.x and don’t update your delivery stream, Firehose + // still delivers data to Elasticsearch with the old index name and type name. If + // you want to update your delivery stream with a new index name, provide an empty + // string for TypeName . TypeName *string noSmithyDocumentSerde } -// Configures retry behavior in case Kinesis Data Firehose is unable to deliver -// documents to Amazon ES. +// Configures retry behavior in case Firehose is unable to deliver documents to +// Amazon ES. type ElasticsearchRetryOptions struct { // After an initial failure to deliver to Amazon ES, the total amount of time - // during which Kinesis Data Firehose retries delivery (including the first - // attempt). After this time has elapsed, the failed documents are written to - // Amazon S3. Default value is 300 seconds (5 minutes). A value of 0 (zero) results - // in no retries. + // during which Firehose retries delivery (including the first attempt). After this + // time has elapsed, the failed documents are written to Amazon S3. Default value + // is 300 seconds (5 minutes). A value of 0 (zero) results in no retries. DurationInSeconds *int32 noSmithyDocumentSerde @@ -1024,6 +1005,9 @@ type ExtendedS3DestinationConfiguration struct { // The compression format. If no value is specified, the default is UNCOMPRESSED. CompressionFormat CompressionFormat + // The time zone you prefer. UTC is the default. + CustomTimeZone *string + // The serializer, deserializer, and schema for converting data from the JSON // format to the Parquet or ORC format before writing it to Amazon S3. DataFormatConversionConfiguration *DataFormatConversionConfiguration @@ -1037,13 +1021,15 @@ type ExtendedS3DestinationConfiguration struct { // encryption. EncryptionConfiguration *EncryptionConfiguration - // A prefix that Kinesis Data Firehose evaluates and adds to failed records before - // writing them to S3. This prefix appears immediately following the bucket name. - // For information about how to specify this prefix, see Custom Prefixes for - // Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) - // . + // A prefix that Firehose evaluates and adds to failed records before writing them + // to S3. This prefix appears immediately following the bucket name. For + // information about how to specify this prefix, see Custom Prefixes for Amazon S3 + // Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) . ErrorOutputPrefix *string + // Specify a file extension. It will override the default file extension + FileExtension *string + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered // Amazon S3 files. You can also specify a custom prefix, as described in Custom // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) @@ -1101,6 +1087,9 @@ type ExtendedS3DestinationDescription struct { // The Amazon CloudWatch logging options for your delivery stream. CloudWatchLoggingOptions *CloudWatchLoggingOptions + // The time zone you prefer. UTC is the default. + CustomTimeZone *string + // The serializer, deserializer, and schema for converting data from the JSON // format to the Parquet or ORC format before writing it to Amazon S3. DataFormatConversionConfiguration *DataFormatConversionConfiguration @@ -1110,13 +1099,15 @@ type ExtendedS3DestinationDescription struct { // Currently, dynamic partitioning is only supported for Amazon S3 destinations. DynamicPartitioningConfiguration *DynamicPartitioningConfiguration - // A prefix that Kinesis Data Firehose evaluates and adds to failed records before - // writing them to S3. This prefix appears immediately following the bucket name. - // For information about how to specify this prefix, see Custom Prefixes for - // Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) - // . + // A prefix that Firehose evaluates and adds to failed records before writing them + // to S3. This prefix appears immediately following the bucket name. For + // information about how to specify this prefix, see Custom Prefixes for Amazon S3 + // Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) . ErrorOutputPrefix *string + // Specify a file extension. It will override the default file extension + FileExtension *string + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered // Amazon S3 files. You can also specify a custom prefix, as described in Custom // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) @@ -1152,6 +1143,9 @@ type ExtendedS3DestinationUpdate struct { // The compression format. If no value is specified, the default is UNCOMPRESSED . CompressionFormat CompressionFormat + // The time zone you prefer. UTC is the default. + CustomTimeZone *string + // The serializer, deserializer, and schema for converting data from the JSON // format to the Parquet or ORC format before writing it to Amazon S3. DataFormatConversionConfiguration *DataFormatConversionConfiguration @@ -1165,13 +1159,15 @@ type ExtendedS3DestinationUpdate struct { // encryption. EncryptionConfiguration *EncryptionConfiguration - // A prefix that Kinesis Data Firehose evaluates and adds to failed records before - // writing them to S3. This prefix appears immediately following the bucket name. - // For information about how to specify this prefix, see Custom Prefixes for - // Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) - // . + // A prefix that Firehose evaluates and adds to failed records before writing them + // to S3. This prefix appears immediately following the bucket name. For + // information about how to specify this prefix, see Custom Prefixes for Amazon S3 + // Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) . ErrorOutputPrefix *string + // Specify a file extension. It will override the default file extension + FileExtension *string + // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered // Amazon S3 files. You can also specify a custom prefix, as described in Custom // Prefixes for Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) @@ -1215,19 +1211,19 @@ type FailureDescription struct { noSmithyDocumentSerde } -// The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for -// deserializing data, which means converting it from the JSON format in -// preparation for serializing it to the Parquet or ORC format. This is one of two -// deserializers you can choose, depending on which one offers the functionality -// you need. The other option is the OpenX SerDe. +// The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, +// which means converting it from the JSON format in preparation for serializing it +// to the Parquet or ORC format. This is one of two deserializers you can choose, +// depending on which one offers the functionality you need. The other option is +// the OpenX SerDe. type HiveJsonSerDe struct { - // Indicates how you want Kinesis Data Firehose to parse the date and timestamps - // that may be present in your input data JSON. To specify these format strings, - // follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more + // Indicates how you want Firehose to parse the date and timestamps that may be + // present in your input data JSON. To specify these format strings, follow the + // pattern syntax of JodaTime's DateTimeFormat format strings. For more // information, see Class DateTimeFormat (https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html) // . You can also use the special value millis to parse timestamps in epoch - // milliseconds. If you don't specify a format, Kinesis Data Firehose uses + // milliseconds. If you don't specify a format, Firehose uses // java.sql.Timestamp::valueOf by default. TimestampFormats []string @@ -1235,10 +1231,10 @@ type HiveJsonSerDe struct { } // Describes the buffering options that can be applied before data is delivered to -// the HTTP endpoint destination. Kinesis Data Firehose treats these options as -// hints, and it might choose to use more optimal values. The SizeInMBs and -// IntervalInSeconds parameters are optional. However, if specify a value for one -// of them, you must also provide a value for the other. +// the HTTP endpoint destination. Firehose treats these options as hints, and it +// might choose to use more optimal values. The SizeInMBs and IntervalInSeconds +// parameters are optional. However, if specify a value for one of them, you must +// also provide a value for the other. type HttpEndpointBufferingHints struct { // Buffer incoming data for the specified period of time, in seconds, before @@ -1320,8 +1316,8 @@ type HttpEndpointDestinationConfiguration struct { S3Configuration *S3DestinationConfiguration // The buffering options that can be used before data is delivered to the - // specified destination. Kinesis Data Firehose treats these options as hints, and - // it might choose to use more optimal values. The SizeInMBs and IntervalInSeconds + // specified destination. Firehose treats these options as hints, and it might + // choose to use more optimal values. The SizeInMBs and IntervalInSeconds // parameters are optional. However, if you specify a value for one of them, you // must also provide a value for the other. BufferingHints *HttpEndpointBufferingHints @@ -1336,19 +1332,19 @@ type HttpEndpointDestinationConfiguration struct { // destination. RequestConfiguration *HttpEndpointRequestConfiguration - // Describes the retry behavior in case Kinesis Data Firehose is unable to deliver - // data to the specified HTTP endpoint destination, or if it doesn't receive a - // valid acknowledgment of receipt from the specified HTTP endpoint destination. + // Describes the retry behavior in case Firehose is unable to deliver data to the + // specified HTTP endpoint destination, or if it doesn't receive a valid + // acknowledgment of receipt from the specified HTTP endpoint destination. RetryOptions *HttpEndpointRetryOptions - // Kinesis Data Firehose uses this IAM role for all the permissions that the - // delivery stream needs. + // Firehose uses this IAM role for all the permissions that the delivery stream + // needs. RoleARN *string - // Describes the S3 bucket backup options for the data that Kinesis Data Firehose - // delivers to the HTTP endpoint destination. You can back up all documents ( - // AllData ) or only the documents that Kinesis Data Firehose could not deliver to - // the specified HTTP endpoint destination ( FailedDataOnly ). + // Describes the S3 bucket backup options for the data that Firehose delivers to + // the HTTP endpoint destination. You can back up all documents ( AllData ) or only + // the documents that Firehose could not deliver to the specified HTTP endpoint + // destination ( FailedDataOnly ). S3BackupMode HttpEndpointS3BackupMode noSmithyDocumentSerde @@ -1358,10 +1354,10 @@ type HttpEndpointDestinationConfiguration struct { type HttpEndpointDestinationDescription struct { // Describes buffering options that can be applied to the data before it is - // delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats these - // options as hints, and it might choose to use more optimal values. The SizeInMBs - // and IntervalInSeconds parameters are optional. However, if specify a value for - // one of them, you must also provide a value for the other. + // delivered to the HTTPS endpoint destination. Firehose teats these options as + // hints, and it might choose to use more optimal values. The SizeInMBs and + // IntervalInSeconds parameters are optional. However, if specify a value for one + // of them, you must also provide a value for the other. BufferingHints *HttpEndpointBufferingHints // Describes the Amazon CloudWatch logging options for your delivery stream. @@ -1377,19 +1373,19 @@ type HttpEndpointDestinationDescription struct { // destination. RequestConfiguration *HttpEndpointRequestConfiguration - // Describes the retry behavior in case Kinesis Data Firehose is unable to deliver - // data to the specified HTTP endpoint destination, or if it doesn't receive a - // valid acknowledgment of receipt from the specified HTTP endpoint destination. + // Describes the retry behavior in case Firehose is unable to deliver data to the + // specified HTTP endpoint destination, or if it doesn't receive a valid + // acknowledgment of receipt from the specified HTTP endpoint destination. RetryOptions *HttpEndpointRetryOptions - // Kinesis Data Firehose uses this IAM role for all the permissions that the - // delivery stream needs. + // Firehose uses this IAM role for all the permissions that the delivery stream + // needs. RoleARN *string // Describes the S3 bucket backup options for the data that Kinesis Firehose // delivers to the HTTP endpoint destination. You can back up all documents ( - // AllData ) or only the documents that Kinesis Data Firehose could not deliver to - // the specified HTTP endpoint destination ( FailedDataOnly ). + // AllData ) or only the documents that Firehose could not deliver to the specified + // HTTP endpoint destination ( FailedDataOnly ). S3BackupMode HttpEndpointS3BackupMode // Describes a destination in Amazon S3. @@ -1402,10 +1398,10 @@ type HttpEndpointDestinationDescription struct { type HttpEndpointDestinationUpdate struct { // Describes buffering options that can be applied to the data before it is - // delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats these - // options as hints, and it might choose to use more optimal values. The SizeInMBs - // and IntervalInSeconds parameters are optional. However, if specify a value for - // one of them, you must also provide a value for the other. + // delivered to the HTTPS endpoint destination. Firehose teats these options as + // hints, and it might choose to use more optimal values. The SizeInMBs and + // IntervalInSeconds parameters are optional. However, if specify a value for one + // of them, you must also provide a value for the other. BufferingHints *HttpEndpointBufferingHints // Describes the Amazon CloudWatch logging options for your delivery stream. @@ -1421,19 +1417,19 @@ type HttpEndpointDestinationUpdate struct { // destination. RequestConfiguration *HttpEndpointRequestConfiguration - // Describes the retry behavior in case Kinesis Data Firehose is unable to deliver - // data to the specified HTTP endpoint destination, or if it doesn't receive a - // valid acknowledgment of receipt from the specified HTTP endpoint destination. + // Describes the retry behavior in case Firehose is unable to deliver data to the + // specified HTTP endpoint destination, or if it doesn't receive a valid + // acknowledgment of receipt from the specified HTTP endpoint destination. RetryOptions *HttpEndpointRetryOptions - // Kinesis Data Firehose uses this IAM role for all the permissions that the - // delivery stream needs. + // Firehose uses this IAM role for all the permissions that the delivery stream + // needs. RoleARN *string // Describes the S3 bucket backup options for the data that Kinesis Firehose // delivers to the HTTP endpoint destination. You can back up all documents ( - // AllData ) or only the documents that Kinesis Data Firehose could not deliver to - // the specified HTTP endpoint destination ( FailedDataOnly ). + // AllData ) or only the documents that Firehose could not deliver to the specified + // HTTP endpoint destination ( FailedDataOnly ). S3BackupMode HttpEndpointS3BackupMode // Describes an update for a destination in Amazon S3. @@ -1448,8 +1444,8 @@ type HttpEndpointRequestConfiguration struct { // Describes the metadata sent to the HTTP endpoint destination. CommonAttributes []HttpEndpointCommonAttribute - // Kinesis Data Firehose uses the content encoding to compress the body of a - // request before sending the request to the destination. For more information, see + // Firehose uses the content encoding to compress the body of a request before + // sending the request to the destination. For more information, see // Content-Encoding (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding) // in MDN Web Docs, the official Mozilla documentation. ContentEncoding ContentEncoding @@ -1457,16 +1453,15 @@ type HttpEndpointRequestConfiguration struct { noSmithyDocumentSerde } -// Describes the retry behavior in case Kinesis Data Firehose is unable to deliver -// data to the specified HTTP endpoint destination, or if it doesn't receive a -// valid acknowledgment of receipt from the specified HTTP endpoint destination. +// Describes the retry behavior in case Firehose is unable to deliver data to the +// specified HTTP endpoint destination, or if it doesn't receive a valid +// acknowledgment of receipt from the specified HTTP endpoint destination. type HttpEndpointRetryOptions struct { - // The total amount of time that Kinesis Data Firehose spends on retries. This - // duration starts after the initial attempt to send data to the custom destination - // via HTTPS endpoint fails. It doesn't include the periods during which Kinesis - // Data Firehose waits for acknowledgment from the specified destination after each - // attempt. + // The total amount of time that Firehose spends on retries. This duration starts + // after the initial attempt to send data to the custom destination via HTTPS + // endpoint fails. It doesn't include the periods during which Firehose waits for + // acknowledgment from the specified destination after each attempt. DurationInSeconds *int32 noSmithyDocumentSerde @@ -1506,12 +1501,12 @@ type KinesisStreamSourceConfiguration struct { noSmithyDocumentSerde } -// Details about a Kinesis data stream used as the source for a Kinesis Data -// Firehose delivery stream. +// Details about a Kinesis data stream used as the source for a Firehose delivery +// stream. type KinesisStreamSourceDescription struct { - // Kinesis Data Firehose starts retrieving records from the Kinesis data stream - // starting with this timestamp. + // Firehose starts retrieving records from the Kinesis data stream starting with + // this timestamp. DeliveryStartTimestamp *time.Time // The Amazon Resource Name (ARN) of the source Kinesis data stream. For more @@ -1565,15 +1560,15 @@ type MSKSourceConfiguration struct { noSmithyDocumentSerde } -// Details about the Amazon MSK cluster used as the source for a Kinesis Data -// Firehose delivery stream. +// Details about the Amazon MSK cluster used as the source for a Firehose delivery +// stream. type MSKSourceDescription struct { // The authentication configuration of the Amazon MSK cluster. AuthenticationConfiguration *AuthenticationConfiguration - // Kinesis Data Firehose starts retrieving records from the topic within the - // Amazon MSK cluster starting with this timestamp. + // Firehose starts retrieving records from the topic within the Amazon MSK cluster + // starting with this timestamp. DeliveryStartTimestamp *time.Time // The ARN of the Amazon MSK cluster. @@ -1585,15 +1580,15 @@ type MSKSourceDescription struct { noSmithyDocumentSerde } -// The OpenX SerDe. Used by Kinesis Data Firehose for deserializing data, which -// means converting it from the JSON format in preparation for serializing it to -// the Parquet or ORC format. This is one of two deserializers you can choose, +// The OpenX SerDe. Used by Firehose for deserializing data, which means +// converting it from the JSON format in preparation for serializing it to the +// Parquet or ORC format. This is one of two deserializers you can choose, // depending on which one offers the functionality you need. The other option is // the native Hive / HCatalog JsonSerDe. type OpenXJsonSerDe struct { - // When set to true , which is the default, Kinesis Data Firehose converts JSON - // keys to lowercase before deserializing them. + // When set to true , which is the default, Firehose converts JSON keys to + // lowercase before deserializing them. CaseInsensitive *bool // Maps column names to JSON keys that aren't identical to the column names. This @@ -1603,10 +1598,10 @@ type OpenXJsonSerDe struct { ColumnToJsonKeyMappings map[string]string // When set to true , specifies that the names of the keys include dots and that - // you want Kinesis Data Firehose to replace them with underscores. This is useful - // because Apache Hive does not allow dots in column names. For example, if the - // JSON contains a key whose name is "a.b", you can define the column name to be - // "a_b" when using this option. The default is false . + // you want Firehose to replace them with underscores. This is useful because + // Apache Hive does not allow dots in column names. For example, if the JSON + // contains a key whose name is "a.b", you can define the column name to be "a_b" + // when using this option. The default is false . ConvertDotsInJsonKeysToUnderscores *bool noSmithyDocumentSerde @@ -1618,12 +1613,12 @@ type OrcSerDe struct { // The Hadoop Distributed File System (HDFS) block size. This is useful if you // intend to copy the data from Amazon S3 to HDFS before querying. The default is - // 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for - // padding calculations. + // 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding + // calculations. BlockSizeBytes *int32 - // The column names for which you want Kinesis Data Firehose to create bloom - // filters. The default is null . + // The column names for which you want Firehose to create bloom filters. The + // default is null . BloomFilterColumns []string // The Bloom filter false positive probability (FPP). The lower the FPP, the @@ -1656,8 +1651,8 @@ type OrcSerDe struct { // of 3.2 MiB for padding within the 256 MiB block. In such a case, if the // available size within the block is more than 3.2 MiB, a new, smaller stripe is // inserted to fit within that space. This ensures that no stripe crosses block - // boundaries and causes remote reads within a node-local task. Kinesis Data - // Firehose ignores this parameter when OrcSerDe$EnablePadding is false . + // boundaries and causes remote reads within a node-local task. Firehose ignores + // this parameter when OrcSerDe$EnablePadding is false . PaddingTolerance *float64 // The number of rows between index entries. The default is 10,000 and the minimum @@ -1671,9 +1666,9 @@ type OrcSerDe struct { noSmithyDocumentSerde } -// Specifies the serializer that you want Kinesis Data Firehose to use to convert -// the format of your data before it writes it to Amazon S3. This parameter is -// required if Enabled is set to true. +// Specifies the serializer that you want Firehose to use to convert the format of +// your data before it writes it to Amazon S3. This parameter is required if +// Enabled is set to true. type OutputFormatConfiguration struct { // Specifies which serializer to use. You can choose either the ORC SerDe or the @@ -1690,8 +1685,8 @@ type ParquetSerDe struct { // The Hadoop Distributed File System (HDFS) block size. This is useful if you // intend to copy the data from Amazon S3 to HDFS before querying. The default is - // 256 MiB and the minimum is 64 MiB. Kinesis Data Firehose uses this value for - // padding calculations. + // 256 MiB and the minimum is 64 MiB. Firehose uses this value for padding + // calculations. BlockSizeBytes *int32 // The compression code to use over data blocks. The possible values are @@ -1731,7 +1726,10 @@ type ProcessingConfiguration struct { noSmithyDocumentSerde } -// Describes a data processor. +// Describes a data processor. If you want to add a new line delimiter between +// records in objects that are delivered to Amazon S3, choose +// AppendDelimiterToRecord as a processor type. You don’t have to put a processor +// parameter when you select AppendDelimiterToRecord . type Processor struct { // The type of processor. @@ -1842,8 +1840,8 @@ type RedshiftDestinationConfiguration struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon Redshift. Default value is 3600 (60 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // Redshift. Default value is 3600 (60 minutes). RetryOptions *RedshiftRetryOptions // The configuration for backup in Amazon S3. @@ -1894,8 +1892,8 @@ type RedshiftDestinationDescription struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon Redshift. Default value is 3600 (60 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // Redshift. Default value is 3600 (60 minutes). RetryOptions *RedshiftRetryOptions // The configuration for backup in Amazon S3. @@ -1925,8 +1923,8 @@ type RedshiftDestinationUpdate struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The retry behavior in case Kinesis Data Firehose is unable to deliver documents - // to Amazon Redshift. Default value is 3600 (60 minutes). + // The retry behavior in case Firehose is unable to deliver documents to Amazon + // Redshift. Default value is 3600 (60 minutes). RetryOptions *RedshiftRetryOptions // The Amazon Resource Name (ARN) of the Amazon Web Services credentials. For more @@ -1954,26 +1952,26 @@ type RedshiftDestinationUpdate struct { noSmithyDocumentSerde } -// Configures retry behavior in case Kinesis Data Firehose is unable to deliver -// documents to Amazon Redshift. +// Configures retry behavior in case Firehose is unable to deliver documents to +// Amazon Redshift. type RedshiftRetryOptions struct { - // The length of time during which Kinesis Data Firehose retries delivery after a - // failure, starting from the initial request and including the first attempt. The - // default value is 3600 seconds (60 minutes). Kinesis Data Firehose does not retry - // if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt - // takes longer than the current value. + // The length of time during which Firehose retries delivery after a failure, + // starting from the initial request and including the first attempt. The default + // value is 3600 seconds (60 minutes). Firehose does not retry if the value of + // DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than + // the current value. DurationInSeconds *int32 noSmithyDocumentSerde } -// The retry behavior in case Kinesis Data Firehose is unable to deliver data to -// an Amazon S3 prefix. +// The retry behavior in case Firehose is unable to deliver data to an Amazon S3 +// prefix. type RetryOptions struct { - // The period of time during which Kinesis Data Firehose retries to deliver data - // to the specified Amazon S3 prefix. + // The period of time during which Firehose retries to deliver data to the + // specified Amazon S3 prefix. DurationInSeconds *int32 noSmithyDocumentSerde @@ -2014,11 +2012,10 @@ type S3DestinationConfiguration struct { // encryption. EncryptionConfiguration *EncryptionConfiguration - // A prefix that Kinesis Data Firehose evaluates and adds to failed records before - // writing them to S3. This prefix appears immediately following the bucket name. - // For information about how to specify this prefix, see Custom Prefixes for - // Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) - // . + // A prefix that Firehose evaluates and adds to failed records before writing them + // to S3. This prefix appears immediately following the bucket name. For + // information about how to specify this prefix, see Custom Prefixes for Amazon S3 + // Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) . ErrorOutputPrefix *string // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered @@ -2068,11 +2065,10 @@ type S3DestinationDescription struct { // The Amazon CloudWatch logging options for your delivery stream. CloudWatchLoggingOptions *CloudWatchLoggingOptions - // A prefix that Kinesis Data Firehose evaluates and adds to failed records before - // writing them to S3. This prefix appears immediately following the bucket name. - // For information about how to specify this prefix, see Custom Prefixes for - // Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) - // . + // A prefix that Firehose evaluates and adds to failed records before writing them + // to S3. This prefix appears immediately following the bucket name. For + // information about how to specify this prefix, see Custom Prefixes for Amazon S3 + // Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) . ErrorOutputPrefix *string // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered @@ -2109,11 +2105,10 @@ type S3DestinationUpdate struct { // encryption. EncryptionConfiguration *EncryptionConfiguration - // A prefix that Kinesis Data Firehose evaluates and adds to failed records before - // writing them to S3. This prefix appears immediately following the bucket name. - // For information about how to specify this prefix, see Custom Prefixes for - // Amazon S3 Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) - // . + // A prefix that Firehose evaluates and adds to failed records before writing them + // to S3. This prefix appears immediately following the bucket name. For + // information about how to specify this prefix, see Custom Prefixes for Amazon S3 + // Objects (https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) . ErrorOutputPrefix *string // The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered @@ -2131,9 +2126,8 @@ type S3DestinationUpdate struct { noSmithyDocumentSerde } -// Specifies the schema to which you want Kinesis Data Firehose to configure your -// data before it writes it to Amazon S3. This parameter is required if Enabled is -// set to true. +// Specifies the schema to which you want Firehose to configure your data before +// it writes it to Amazon S3. This parameter is required if Enabled is set to true. type SchemaConfiguration struct { // The ID of the Amazon Web Services Glue Data Catalog. If you don't supply this, @@ -2150,11 +2144,11 @@ type SchemaConfiguration struct { // Region. Region *string - // The role that Kinesis Data Firehose can use to access Amazon Web Services Glue. - // This role must be in the same account you use for Kinesis Data Firehose. - // Cross-account roles aren't allowed. If the SchemaConfiguration request - // parameter is used as part of invoking the CreateDeliveryStream API, then the - // RoleARN property is required and its value must be specified. + // The role that Firehose can use to access Amazon Web Services Glue. This role + // must be in the same account you use for Firehose. Cross-account roles aren't + // allowed. If the SchemaConfiguration request parameter is used as part of + // invoking the CreateDeliveryStream API, then the RoleARN property is required + // and its value must be specified. RoleARN *string // Specifies the Amazon Web Services Glue table that contains the column @@ -2164,17 +2158,16 @@ type SchemaConfiguration struct { TableName *string // Specifies the table version for the output data schema. If you don't specify - // this version ID, or if you set it to LATEST , Kinesis Data Firehose uses the - // most recent version. This means that any updates to the table are automatically - // picked up. + // this version ID, or if you set it to LATEST , Firehose uses the most recent + // version. This means that any updates to the table are automatically picked up. VersionId *string noSmithyDocumentSerde } -// The serializer that you want Kinesis Data Firehose to use to convert data to -// the target format before writing it to Amazon S3. Kinesis Data Firehose supports -// two types of serializers: the ORC SerDe (https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/orc/OrcSerde.html) +// The serializer that you want Firehose to use to convert data to the target +// format before writing it to Amazon S3. Firehose supports two types of +// serializers: the ORC SerDe (https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/orc/OrcSerde.html) // and the Parquet SerDe (https://hive.apache.org/javadocs/r1.2.2/api/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.html) // . type Serializer struct { @@ -2262,8 +2255,8 @@ type SnowflakeDestinationConfiguration struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The time period where Kinesis Data Firehose will retry sending data to the - // chosen HTTP endpoint. + // The time period where Firehose will retry sending data to the chosen HTTP + // endpoint. RetryOptions *SnowflakeRetryOptions // Choose an S3 backup mode @@ -2309,8 +2302,8 @@ type SnowflakeDestinationDescription struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The time period where Kinesis Data Firehose will retry sending data to the - // chosen HTTP endpoint. + // The time period where Firehose will retry sending data to the chosen HTTP + // endpoint. RetryOptions *SnowflakeRetryOptions // The Amazon Resource Name (ARN) of the Snowflake role @@ -2383,22 +2376,21 @@ type SnowflakeDestinationUpdate struct { // Describes a data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // Specify how long Kinesis Data Firehose retries sending data to the New Relic - // HTTP endpoint. After sending data, Kinesis Data Firehose first waits for an - // acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment - // doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose - // starts the retry duration counter. It keeps retrying until the retry duration - // expires. After that, Kinesis Data Firehose considers it a data delivery failure - // and backs up the data to your Amazon S3 bucket. Every time that Kinesis Data - // Firehose sends data to the HTTP endpoint (either the initial attempt or a - // retry), it restarts the acknowledgement timeout counter and waits for an + // Specify how long Firehose retries sending data to the New Relic HTTP endpoint. + // After sending data, Firehose first waits for an acknowledgment from the HTTP + // endpoint. If an error occurs or the acknowledgment doesn’t arrive within the + // acknowledgment timeout period, Firehose starts the retry duration counter. It + // keeps retrying until the retry duration expires. After that, Firehose considers + // it a data delivery failure and backs up the data to your Amazon S3 bucket. Every + // time that Firehose sends data to the HTTP endpoint (either the initial attempt + // or a retry), it restarts the acknowledgement timeout counter and waits for an // acknowledgement from the HTTP endpoint. Even if the retry duration expires, - // Kinesis Data Firehose still waits for the acknowledgment until it receives it or - // the acknowledgement timeout period is reached. If the acknowledgment times out, - // Kinesis Data Firehose determines whether there's time left in the retry counter. - // If there is time left, it retries again and repeats the logic until it receives - // an acknowledgment or determines that the retry time has expired. If you don't - // want Kinesis Data Firehose to retry sending data, set this value to 0. + // Firehose still waits for the acknowledgment until it receives it or the + // acknowledgement timeout period is reached. If the acknowledgment times out, + // Firehose determines whether there's time left in the retry counter. If there is + // time left, it retries again and repeats the logic until it receives an + // acknowledgment or determines that the retry time has expired. If you don't want + // Firehose to retry sending data, set this value to 0. RetryOptions *SnowflakeRetryOptions // The Amazon Resource Name (ARN) of the Snowflake role @@ -2428,26 +2420,25 @@ type SnowflakeDestinationUpdate struct { noSmithyDocumentSerde } -// Specify how long Kinesis Data Firehose retries sending data to the New Relic -// HTTP endpoint. After sending data, Kinesis Data Firehose first waits for an -// acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment -// doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose -// starts the retry duration counter. It keeps retrying until the retry duration -// expires. After that, Kinesis Data Firehose considers it a data delivery failure -// and backs up the data to your Amazon S3 bucket. Every time that Kinesis Data -// Firehose sends data to the HTTP endpoint (either the initial attempt or a -// retry), it restarts the acknowledgement timeout counter and waits for an +// Specify how long Firehose retries sending data to the New Relic HTTP endpoint. +// After sending data, Firehose first waits for an acknowledgment from the HTTP +// endpoint. If an error occurs or the acknowledgment doesn’t arrive within the +// acknowledgment timeout period, Firehose starts the retry duration counter. It +// keeps retrying until the retry duration expires. After that, Firehose considers +// it a data delivery failure and backs up the data to your Amazon S3 bucket. Every +// time that Firehose sends data to the HTTP endpoint (either the initial attempt +// or a retry), it restarts the acknowledgement timeout counter and waits for an // acknowledgement from the HTTP endpoint. Even if the retry duration expires, -// Kinesis Data Firehose still waits for the acknowledgment until it receives it or -// the acknowledgement timeout period is reached. If the acknowledgment times out, -// Kinesis Data Firehose determines whether there's time left in the retry counter. -// If there is time left, it retries again and repeats the logic until it receives -// an acknowledgment or determines that the retry time has expired. If you don't -// want Kinesis Data Firehose to retry sending data, set this value to 0. +// Firehose still waits for the acknowledgment until it receives it or the +// acknowledgement timeout period is reached. If the acknowledgment times out, +// Firehose determines whether there's time left in the retry counter. If there is +// time left, it retries again and repeats the logic until it receives an +// acknowledgment or determines that the retry time has expired. If you don't want +// Firehose to retry sending data, set this value to 0. type SnowflakeRetryOptions struct { - // the time period where Kinesis Data Firehose will retry sending data to the - // chosen HTTP endpoint. + // the time period where Firehose will retry sending data to the chosen HTTP + // endpoint. DurationInSeconds *int32 noSmithyDocumentSerde @@ -2479,8 +2470,8 @@ type SnowflakeVpcConfiguration struct { noSmithyDocumentSerde } -// Details about a Kinesis data stream used as the source for a Kinesis Data -// Firehose delivery stream. +// Details about a Kinesis data stream used as the source for a Firehose delivery +// stream. type SourceDescription struct { // The KinesisStreamSourceDescription value for the source Kinesis data stream. @@ -2511,8 +2502,7 @@ type SplunkBufferingHints struct { // Describes the configuration of a destination in Splunk. type SplunkDestinationConfiguration struct { - // The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends - // your data. + // The HTTP Event Collector (HEC) endpoint to which Firehose sends your data. // // This member is required. HECEndpoint *string @@ -2540,26 +2530,25 @@ type SplunkDestinationConfiguration struct { // The Amazon CloudWatch logging options for your delivery stream. CloudWatchLoggingOptions *CloudWatchLoggingOptions - // The amount of time that Kinesis Data Firehose waits to receive an - // acknowledgment from Splunk after it sends it data. At the end of the timeout - // period, Kinesis Data Firehose either tries to send the data again or considers - // it an error, based on your retry settings. + // The amount of time that Firehose waits to receive an acknowledgment from Splunk + // after it sends it data. At the end of the timeout period, Firehose either tries + // to send the data again or considers it an error, based on your retry settings. HECAcknowledgmentTimeoutInSeconds *int32 // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The retry behavior in case Kinesis Data Firehose is unable to deliver data to - // Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk. + // The retry behavior in case Firehose is unable to deliver data to Splunk, or if + // it doesn't receive an acknowledgment of receipt from Splunk. RetryOptions *SplunkRetryOptions // Defines how documents should be delivered to Amazon S3. When set to - // FailedEventsOnly , Kinesis Data Firehose writes any data that could not be - // indexed to the configured Amazon S3 destination. When set to AllEvents , Kinesis - // Data Firehose delivers all incoming records to Amazon S3, and also writes failed - // documents to Amazon S3. The default value is FailedEventsOnly . You can update - // this backup mode from FailedEventsOnly to AllEvents . You can't update it from - // AllEvents to FailedEventsOnly . + // FailedEventsOnly , Firehose writes any data that could not be indexed to the + // configured Amazon S3 destination. When set to AllEvents , Firehose delivers all + // incoming records to Amazon S3, and also writes failed documents to Amazon S3. + // The default value is FailedEventsOnly . You can update this backup mode from + // FailedEventsOnly to AllEvents . You can't update it from AllEvents to + // FailedEventsOnly . S3BackupMode SplunkS3BackupMode noSmithyDocumentSerde @@ -2575,14 +2564,12 @@ type SplunkDestinationDescription struct { // The Amazon CloudWatch logging options for your delivery stream. CloudWatchLoggingOptions *CloudWatchLoggingOptions - // The amount of time that Kinesis Data Firehose waits to receive an - // acknowledgment from Splunk after it sends it data. At the end of the timeout - // period, Kinesis Data Firehose either tries to send the data again or considers - // it an error, based on your retry settings. + // The amount of time that Firehose waits to receive an acknowledgment from Splunk + // after it sends it data. At the end of the timeout period, Firehose either tries + // to send the data again or considers it an error, based on your retry settings. HECAcknowledgmentTimeoutInSeconds *int32 - // The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends - // your data. + // The HTTP Event Collector (HEC) endpoint to which Firehose sends your data. HECEndpoint *string // This type can be either "Raw" or "Event." @@ -2594,15 +2581,15 @@ type SplunkDestinationDescription struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The retry behavior in case Kinesis Data Firehose is unable to deliver data to - // Splunk or if it doesn't receive an acknowledgment of receipt from Splunk. + // The retry behavior in case Firehose is unable to deliver data to Splunk or if + // it doesn't receive an acknowledgment of receipt from Splunk. RetryOptions *SplunkRetryOptions // Defines how documents should be delivered to Amazon S3. When set to - // FailedDocumentsOnly , Kinesis Data Firehose writes any data that could not be - // indexed to the configured Amazon S3 destination. When set to AllDocuments , - // Kinesis Data Firehose delivers all incoming records to Amazon S3, and also - // writes failed documents to Amazon S3. Default value is FailedDocumentsOnly . + // FailedDocumentsOnly , Firehose writes any data that could not be indexed to the + // configured Amazon S3 destination. When set to AllDocuments , Firehose delivers + // all incoming records to Amazon S3, and also writes failed documents to Amazon + // S3. Default value is FailedDocumentsOnly . S3BackupMode SplunkS3BackupMode // The Amazon S3 destination.> @@ -2621,14 +2608,12 @@ type SplunkDestinationUpdate struct { // The Amazon CloudWatch logging options for your delivery stream. CloudWatchLoggingOptions *CloudWatchLoggingOptions - // The amount of time that Kinesis Data Firehose waits to receive an - // acknowledgment from Splunk after it sends data. At the end of the timeout - // period, Kinesis Data Firehose either tries to send the data again or considers - // it an error, based on your retry settings. + // The amount of time that Firehose waits to receive an acknowledgment from Splunk + // after it sends data. At the end of the timeout period, Firehose either tries to + // send the data again or considers it an error, based on your retry settings. HECAcknowledgmentTimeoutInSeconds *int32 - // The HTTP Event Collector (HEC) endpoint to which Kinesis Data Firehose sends - // your data. + // The HTTP Event Collector (HEC) endpoint to which Firehose sends your data. HECEndpoint *string // This type can be either "Raw" or "Event." @@ -2641,17 +2626,17 @@ type SplunkDestinationUpdate struct { // The data processing configuration. ProcessingConfiguration *ProcessingConfiguration - // The retry behavior in case Kinesis Data Firehose is unable to deliver data to - // Splunk or if it doesn't receive an acknowledgment of receipt from Splunk. + // The retry behavior in case Firehose is unable to deliver data to Splunk or if + // it doesn't receive an acknowledgment of receipt from Splunk. RetryOptions *SplunkRetryOptions - // Specifies how you want Kinesis Data Firehose to back up documents to Amazon S3. - // When set to FailedDocumentsOnly , Kinesis Data Firehose writes any data that - // could not be indexed to the configured Amazon S3 destination. When set to - // AllEvents , Kinesis Data Firehose delivers all incoming records to Amazon S3, - // and also writes failed documents to Amazon S3. The default value is - // FailedEventsOnly . You can update this backup mode from FailedEventsOnly to - // AllEvents . You can't update it from AllEvents to FailedEventsOnly . + // Specifies how you want Firehose to back up documents to Amazon S3. When set to + // FailedDocumentsOnly , Firehose writes any data that could not be indexed to the + // configured Amazon S3 destination. When set to AllEvents , Firehose delivers all + // incoming records to Amazon S3, and also writes failed documents to Amazon S3. + // The default value is FailedEventsOnly . You can update this backup mode from + // FailedEventsOnly to AllEvents . You can't update it from AllEvents to + // FailedEventsOnly . S3BackupMode SplunkS3BackupMode // Your update to the configuration of the backup Amazon S3 location. @@ -2660,14 +2645,14 @@ type SplunkDestinationUpdate struct { noSmithyDocumentSerde } -// Configures retry behavior in case Kinesis Data Firehose is unable to deliver -// documents to Splunk, or if it doesn't receive an acknowledgment from Splunk. +// Configures retry behavior in case Firehose is unable to deliver documents to +// Splunk, or if it doesn't receive an acknowledgment from Splunk. type SplunkRetryOptions struct { - // The total amount of time that Kinesis Data Firehose spends on retries. This - // duration starts after the initial attempt to send data to Splunk fails. It - // doesn't include the periods during which Kinesis Data Firehose waits for - // acknowledgment from Splunk after each attempt. + // The total amount of time that Firehose spends on retries. This duration starts + // after the initial attempt to send data to Splunk fails. It doesn't include the + // periods during which Firehose waits for acknowledgment from Splunk after each + // attempt. DurationInSeconds *int32 noSmithyDocumentSerde @@ -2696,10 +2681,10 @@ type Tag struct { type VpcConfiguration struct { // The ARN of the IAM role that you want the delivery stream to use to create - // endpoints in the destination VPC. You can use your existing Kinesis Data - // Firehose delivery role or you can specify a new role. In either case, make sure - // that the role trusts the Kinesis Data Firehose service principal and that it - // grants the following permissions: + // endpoints in the destination VPC. You can use your existing Firehose delivery + // role or you can specify a new role. In either case, make sure that the role + // trusts the Firehose service principal and that it grants the following + // permissions: // - ec2:DescribeVpcs // - ec2:DescribeVpcAttribute // - ec2:DescribeSubnets @@ -2708,18 +2693,20 @@ type VpcConfiguration struct { // - ec2:CreateNetworkInterface // - ec2:CreateNetworkInterfacePermission // - ec2:DeleteNetworkInterface - // If you revoke these permissions after you create the delivery stream, Kinesis - // Data Firehose can't scale out by creating more ENIs when necessary. You might - // therefore see a degradation in performance. + // When you specify subnets for delivering data to the destination in a private + // VPC, make sure you have enough number of free IP addresses in chosen subnets. If + // there is no available free IP address in a specified subnet, Firehose cannot + // create or add ENIs for the data delivery in the private VPC, and the delivery + // will be degraded or fail. // // This member is required. RoleARN *string - // The IDs of the security groups that you want Kinesis Data Firehose to use when - // it creates ENIs in the VPC of the Amazon ES destination. You can use the same - // security group that the Amazon ES domain uses or different ones. If you specify - // different security groups here, ensure that they allow outbound HTTPS traffic to - // the Amazon ES domain's security group. Also ensure that the Amazon ES domain's + // The IDs of the security groups that you want Firehose to use when it creates + // ENIs in the VPC of the Amazon ES destination. You can use the same security + // group that the Amazon ES domain uses or different ones. If you specify different + // security groups here, ensure that they allow outbound HTTPS traffic to the + // Amazon ES domain's security group. Also ensure that the Amazon ES domain's // security group allows HTTPS traffic from the security groups specified here. If // you use the same security group for both your delivery stream and the Amazon ES // domain, make sure the security group inbound rule allows HTTPS traffic. For more @@ -2729,18 +2716,18 @@ type VpcConfiguration struct { // This member is required. SecurityGroupIds []string - // The IDs of the subnets that you want Kinesis Data Firehose to use to create - // ENIs in the VPC of the Amazon ES destination. Make sure that the routing tables - // and inbound and outbound rules allow traffic to flow from the subnets whose IDs - // are specified here to the subnets that have the destination Amazon ES endpoints. - // Kinesis Data Firehose creates at least one ENI in each of the subnets that are - // specified here. Do not delete or modify these ENIs. The number of ENIs that - // Kinesis Data Firehose creates in the subnets specified here scales up and down - // automatically based on throughput. To enable Kinesis Data Firehose to scale up - // the number of ENIs to match throughput, ensure that you have sufficient quota. - // To help you calculate the quota you need, assume that Kinesis Data Firehose can - // create up to three ENIs for this delivery stream for each of the subnets - // specified here. For more information about ENI quota, see Network Interfaces (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis) + // The IDs of the subnets that you want Firehose to use to create ENIs in the VPC + // of the Amazon ES destination. Make sure that the routing tables and inbound and + // outbound rules allow traffic to flow from the subnets whose IDs are specified + // here to the subnets that have the destination Amazon ES endpoints. Firehose + // creates at least one ENI in each of the subnets that are specified here. Do not + // delete or modify these ENIs. The number of ENIs that Firehose creates in the + // subnets specified here scales up and down automatically based on throughput. To + // enable Firehose to scale up the number of ENIs to match throughput, ensure that + // you have sufficient quota. To help you calculate the quota you need, assume that + // Firehose can create up to three ENIs for this delivery stream for each of the + // subnets specified here. For more information about ENI quota, see Network + // Interfaces (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis) // in the Amazon VPC Quotas topic. // // This member is required. @@ -2753,10 +2740,9 @@ type VpcConfiguration struct { type VpcConfigurationDescription struct { // The ARN of the IAM role that the delivery stream uses to create endpoints in - // the destination VPC. You can use your existing Kinesis Data Firehose delivery - // role or you can specify a new role. In either case, make sure that the role - // trusts the Kinesis Data Firehose service principal and that it grants the - // following permissions: + // the destination VPC. You can use your existing Firehose delivery role or you can + // specify a new role. In either case, make sure that the role trusts the Firehose + // service principal and that it grants the following permissions: // - ec2:DescribeVpcs // - ec2:DescribeVpcAttribute // - ec2:DescribeSubnets @@ -2765,39 +2751,39 @@ type VpcConfigurationDescription struct { // - ec2:CreateNetworkInterface // - ec2:CreateNetworkInterfacePermission // - ec2:DeleteNetworkInterface - // If you revoke these permissions after you create the delivery stream, Kinesis - // Data Firehose can't scale out by creating more ENIs when necessary. You might - // therefore see a degradation in performance. + // If you revoke these permissions after you create the delivery stream, Firehose + // can't scale out by creating more ENIs when necessary. You might therefore see a + // degradation in performance. // // This member is required. RoleARN *string - // The IDs of the security groups that Kinesis Data Firehose uses when it creates - // ENIs in the VPC of the Amazon ES destination. You can use the same security - // group that the Amazon ES domain uses or different ones. If you specify different - // security groups, ensure that they allow outbound HTTPS traffic to the Amazon ES - // domain's security group. Also ensure that the Amazon ES domain's security group - // allows HTTPS traffic from the security groups specified here. If you use the - // same security group for both your delivery stream and the Amazon ES domain, make - // sure the security group inbound rule allows HTTPS traffic. For more information - // about security group rules, see Security group rules (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#SecurityGroupRules) + // The IDs of the security groups that Firehose uses when it creates ENIs in the + // VPC of the Amazon ES destination. You can use the same security group that the + // Amazon ES domain uses or different ones. If you specify different security + // groups, ensure that they allow outbound HTTPS traffic to the Amazon ES domain's + // security group. Also ensure that the Amazon ES domain's security group allows + // HTTPS traffic from the security groups specified here. If you use the same + // security group for both your delivery stream and the Amazon ES domain, make sure + // the security group inbound rule allows HTTPS traffic. For more information about + // security group rules, see Security group rules (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#SecurityGroupRules) // in the Amazon VPC documentation. // // This member is required. SecurityGroupIds []string - // The IDs of the subnets that Kinesis Data Firehose uses to create ENIs in the - // VPC of the Amazon ES destination. Make sure that the routing tables and inbound - // and outbound rules allow traffic to flow from the subnets whose IDs are - // specified here to the subnets that have the destination Amazon ES endpoints. - // Kinesis Data Firehose creates at least one ENI in each of the subnets that are - // specified here. Do not delete or modify these ENIs. The number of ENIs that - // Kinesis Data Firehose creates in the subnets specified here scales up and down - // automatically based on throughput. To enable Kinesis Data Firehose to scale up - // the number of ENIs to match throughput, ensure that you have sufficient quota. - // To help you calculate the quota you need, assume that Kinesis Data Firehose can - // create up to three ENIs for this delivery stream for each of the subnets - // specified here. For more information about ENI quota, see Network Interfaces (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis) + // The IDs of the subnets that Firehose uses to create ENIs in the VPC of the + // Amazon ES destination. Make sure that the routing tables and inbound and + // outbound rules allow traffic to flow from the subnets whose IDs are specified + // here to the subnets that have the destination Amazon ES endpoints. Firehose + // creates at least one ENI in each of the subnets that are specified here. Do not + // delete or modify these ENIs. The number of ENIs that Firehose creates in the + // subnets specified here scales up and down automatically based on throughput. To + // enable Firehose to scale up the number of ENIs to match throughput, ensure that + // you have sufficient quota. To help you calculate the quota you need, assume that + // Firehose can create up to three ENIs for this delivery stream for each of the + // subnets specified here. For more information about ENI quota, see Network + // Interfaces (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis) // in the Amazon VPC Quotas topic. // // This member is required. diff --git a/service/lambda/api_op_CreateEventSourceMapping.go b/service/lambda/api_op_CreateEventSourceMapping.go index d2b8153f1e9..4b9d3820a06 100644 --- a/service/lambda/api_op_CreateEventSourceMapping.go +++ b/service/lambda/api_op_CreateEventSourceMapping.go @@ -99,8 +99,9 @@ type CreateEventSourceMappingInput struct { // batch in two and retry. BisectBatchOnFunctionError *bool - // (Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard - // Amazon SNS topic destination for discarded records. + // (Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A + // configuration object that specifies the destination of an event after Lambda + // processes it. DestinationConfig *types.DestinationConfig // Specific configuration settings for a DocumentDB event source. @@ -114,7 +115,9 @@ type CreateEventSourceMappingInput struct { // - Amazon Kinesis – The ARN of the data stream or a stream consumer. // - Amazon DynamoDB Streams – The ARN of the stream. // - Amazon Simple Queue Service – The ARN of the queue. - // - Amazon Managed Streaming for Apache Kafka – The ARN of the cluster. + // - Amazon Managed Streaming for Apache Kafka – The ARN of the cluster or the + // ARN of the VPC connection (for cross-account event source mappings (https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#msk-multi-vpc) + // ). // - Amazon MQ – The ARN of the broker. // - Amazon DocumentDB – The ARN of the DocumentDB change stream. EventSourceArn *string diff --git a/service/lambda/api_op_CreateFunction.go b/service/lambda/api_op_CreateFunction.go index 1e78eee18a2..f0bf3a1438e 100644 --- a/service/lambda/api_op_CreateFunction.go +++ b/service/lambda/api_op_CreateFunction.go @@ -118,7 +118,9 @@ type CreateFunctionInput struct { Environment *types.Environment // The size of the function's /tmp directory in MB. The default value is 512, but - // can be any whole number between 512 and 10,240 MB. + // can be any whole number between 512 and 10,240 MB. For more information, see + // Configuring ephemeral storage (console) (https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-ephemeral-storage) + // . EphemeralStorage *types.EphemeralStorage // Connection settings for an Amazon EFS file system. @@ -225,8 +227,10 @@ type CreateFunctionOutput struct { // . Omitted from CloudTrail logs. Environment *types.EnvironmentResponse - // The size of the function’s /tmp directory in MB. The default value is 512, but - // it can be any whole number between 512 and 10,240 MB. + // The size of the function's /tmp directory in MB. The default value is 512, but + // can be any whole number between 512 and 10,240 MB. For more information, see + // Configuring ephemeral storage (console) (https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-ephemeral-storage) + // . EphemeralStorage *types.EphemeralStorage // Connection settings for an Amazon EFS file system (https://docs.aws.amazon.com/lambda/latest/dg/configuration-filesystem.html) diff --git a/service/lambda/api_op_GetFunctionConfiguration.go b/service/lambda/api_op_GetFunctionConfiguration.go index f65e80d36cd..0ec5d54b804 100644 --- a/service/lambda/api_op_GetFunctionConfiguration.go +++ b/service/lambda/api_op_GetFunctionConfiguration.go @@ -79,8 +79,10 @@ type GetFunctionConfigurationOutput struct { // . Omitted from CloudTrail logs. Environment *types.EnvironmentResponse - // The size of the function’s /tmp directory in MB. The default value is 512, but - // it can be any whole number between 512 and 10,240 MB. + // The size of the function's /tmp directory in MB. The default value is 512, but + // can be any whole number between 512 and 10,240 MB. For more information, see + // Configuring ephemeral storage (console) (https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-ephemeral-storage) + // . EphemeralStorage *types.EphemeralStorage // Connection settings for an Amazon EFS file system (https://docs.aws.amazon.com/lambda/latest/dg/configuration-filesystem.html) diff --git a/service/lambda/api_op_Invoke.go b/service/lambda/api_op_Invoke.go index 80cb7c21bb9..ae00a1263c9 100644 --- a/service/lambda/api_op_Invoke.go +++ b/service/lambda/api_op_Invoke.go @@ -77,7 +77,8 @@ type InvokeInput struct { FunctionName *string // Up to 3,583 bytes of base64-encoded data about the invoking client to pass to - // the function in the context object. + // the function in the context object. Lambda passes the ClientContext object to + // your function for synchronous invocations only. ClientContext *string // Choose from the following options. diff --git a/service/lambda/api_op_InvokeAsync.go b/service/lambda/api_op_InvokeAsync.go index a368a4272a1..0a84bd195c4 100644 --- a/service/lambda/api_op_InvokeAsync.go +++ b/service/lambda/api_op_InvokeAsync.go @@ -13,7 +13,9 @@ import ( ) // For asynchronous function invocation, use Invoke . Invokes a function -// asynchronously. +// asynchronously. If you do use the InvokeAsync action, note that it doesn't +// support the use of X-Ray active tracing. Trace ID is not propagated to the +// function, even if X-Ray active tracing is turned on. // // Deprecated: This operation has been deprecated. func (c *Client) InvokeAsync(ctx context.Context, params *InvokeAsyncInput, optFns ...func(*Options)) (*InvokeAsyncOutput, error) { diff --git a/service/lambda/api_op_ListEventSourceMappings.go b/service/lambda/api_op_ListEventSourceMappings.go index d694003f619..49e9cf33e07 100644 --- a/service/lambda/api_op_ListEventSourceMappings.go +++ b/service/lambda/api_op_ListEventSourceMappings.go @@ -35,7 +35,9 @@ type ListEventSourceMappingsInput struct { // - Amazon Kinesis – The ARN of the data stream or a stream consumer. // - Amazon DynamoDB Streams – The ARN of the stream. // - Amazon Simple Queue Service – The ARN of the queue. - // - Amazon Managed Streaming for Apache Kafka – The ARN of the cluster. + // - Amazon Managed Streaming for Apache Kafka – The ARN of the cluster or the + // ARN of the VPC connection (for cross-account event source mappings (https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#msk-multi-vpc) + // ). // - Amazon MQ – The ARN of the broker. // - Amazon DocumentDB – The ARN of the DocumentDB change stream. EventSourceArn *string diff --git a/service/lambda/api_op_ListLayerVersions.go b/service/lambda/api_op_ListLayerVersions.go index 850f3373b5e..04dd2c5a87f 100644 --- a/service/lambda/api_op_ListLayerVersions.go +++ b/service/lambda/api_op_ListLayerVersions.go @@ -43,7 +43,7 @@ type ListLayerVersionsInput struct { // . CompatibleArchitecture types.Architecture - // A runtime identifier. For example, go1.x . The following list includes + // A runtime identifier. For example, java21 . The following list includes // deprecated runtimes. For more information, see Runtime deprecation policy (https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html#runtime-support-policy) // . CompatibleRuntime types.Runtime diff --git a/service/lambda/api_op_ListLayers.go b/service/lambda/api_op_ListLayers.go index 9554babe71a..e3c233a84d5 100644 --- a/service/lambda/api_op_ListLayers.go +++ b/service/lambda/api_op_ListLayers.go @@ -40,7 +40,7 @@ type ListLayersInput struct { // . CompatibleArchitecture types.Architecture - // A runtime identifier. For example, go1.x . The following list includes + // A runtime identifier. For example, java21 . The following list includes // deprecated runtimes. For more information, see Runtime deprecation policy (https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html#runtime-support-policy) // . CompatibleRuntime types.Runtime diff --git a/service/lambda/api_op_PublishVersion.go b/service/lambda/api_op_PublishVersion.go index 1bfa05fa8ae..86613a5718d 100644 --- a/service/lambda/api_op_PublishVersion.go +++ b/service/lambda/api_op_PublishVersion.go @@ -89,8 +89,10 @@ type PublishVersionOutput struct { // . Omitted from CloudTrail logs. Environment *types.EnvironmentResponse - // The size of the function’s /tmp directory in MB. The default value is 512, but - // it can be any whole number between 512 and 10,240 MB. + // The size of the function's /tmp directory in MB. The default value is 512, but + // can be any whole number between 512 and 10,240 MB. For more information, see + // Configuring ephemeral storage (console) (https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-ephemeral-storage) + // . EphemeralStorage *types.EphemeralStorage // Connection settings for an Amazon EFS file system (https://docs.aws.amazon.com/lambda/latest/dg/configuration-filesystem.html) diff --git a/service/lambda/api_op_UpdateEventSourceMapping.go b/service/lambda/api_op_UpdateEventSourceMapping.go index 534c3847b87..9a9973c1e5b 100644 --- a/service/lambda/api_op_UpdateEventSourceMapping.go +++ b/service/lambda/api_op_UpdateEventSourceMapping.go @@ -89,8 +89,9 @@ type UpdateEventSourceMappingInput struct { // batch in two and retry. BisectBatchOnFunctionError *bool - // (Kinesis and DynamoDB Streams only) A standard Amazon SQS queue or standard - // Amazon SNS topic destination for discarded records. + // (Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A + // configuration object that specifies the destination of an event after Lambda + // processes it. DestinationConfig *types.DestinationConfig // Specific configuration settings for a DocumentDB event source. diff --git a/service/lambda/api_op_UpdateFunctionCode.go b/service/lambda/api_op_UpdateFunctionCode.go index 8151d823147..4da8bca1b19 100644 --- a/service/lambda/api_op_UpdateFunctionCode.go +++ b/service/lambda/api_op_UpdateFunctionCode.go @@ -120,8 +120,10 @@ type UpdateFunctionCodeOutput struct { // . Omitted from CloudTrail logs. Environment *types.EnvironmentResponse - // The size of the function’s /tmp directory in MB. The default value is 512, but - // it can be any whole number between 512 and 10,240 MB. + // The size of the function's /tmp directory in MB. The default value is 512, but + // can be any whole number between 512 and 10,240 MB. For more information, see + // Configuring ephemeral storage (console) (https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-ephemeral-storage) + // . EphemeralStorage *types.EphemeralStorage // Connection settings for an Amazon EFS file system (https://docs.aws.amazon.com/lambda/latest/dg/configuration-filesystem.html) diff --git a/service/lambda/api_op_UpdateFunctionConfiguration.go b/service/lambda/api_op_UpdateFunctionConfiguration.go index 5b3d62fa118..89ace52db24 100644 --- a/service/lambda/api_op_UpdateFunctionConfiguration.go +++ b/service/lambda/api_op_UpdateFunctionConfiguration.go @@ -65,7 +65,9 @@ type UpdateFunctionConfigurationInput struct { Environment *types.Environment // The size of the function's /tmp directory in MB. The default value is 512, but - // can be any whole number between 512 and 10,240 MB. + // can be any whole number between 512 and 10,240 MB. For more information, see + // Configuring ephemeral storage (console) (https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-ephemeral-storage) + // . EphemeralStorage *types.EphemeralStorage // Connection settings for an Amazon EFS file system. @@ -169,8 +171,10 @@ type UpdateFunctionConfigurationOutput struct { // . Omitted from CloudTrail logs. Environment *types.EnvironmentResponse - // The size of the function’s /tmp directory in MB. The default value is 512, but - // it can be any whole number between 512 and 10,240 MB. + // The size of the function's /tmp directory in MB. The default value is 512, but + // can be any whole number between 512 and 10,240 MB. For more information, see + // Configuring ephemeral storage (console) (https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-ephemeral-storage) + // . EphemeralStorage *types.EphemeralStorage // Connection settings for an Amazon EFS file system (https://docs.aws.amazon.com/lambda/latest/dg/configuration-filesystem.html) diff --git a/service/lambda/types/types.go b/service/lambda/types/types.go index 3eddd9eeb7f..bfb88374c92 100644 --- a/service/lambda/types/types.go +++ b/service/lambda/types/types.go @@ -287,7 +287,9 @@ type EnvironmentResponse struct { } // The size of the function's /tmp directory in MB. The default value is 512, but -// it can be any whole number between 512 and 10,240 MB. +// can be any whole number between 512 and 10,240 MB. For more information, see +// Configuring ephemeral storage (console) (https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-ephemeral-storage) +// . type EphemeralStorage struct { // The size of the function's /tmp directory. @@ -537,8 +539,10 @@ type FunctionConfiguration struct { // . Omitted from CloudTrail logs. Environment *EnvironmentResponse - // The size of the function’s /tmp directory in MB. The default value is 512, but - // it can be any whole number between 512 and 10,240 MB. + // The size of the function's /tmp directory in MB. The default value is 512, but + // can be any whole number between 512 and 10,240 MB. For more information, see + // Configuring ephemeral storage (console) (https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-ephemeral-storage) + // . EphemeralStorage *EphemeralStorage // Connection settings for an Amazon EFS file system (https://docs.aws.amazon.com/lambda/latest/dg/configuration-filesystem.html) @@ -938,8 +942,8 @@ type LayerVersionsListItem struct { type LoggingConfig struct { // Set this property to filter the application logs for your function that Lambda - // sends to CloudWatch. Lambda only sends application logs at the selected level - // and lower. + // sends to CloudWatch. Lambda only sends application logs at the selected level of + // detail and lower, where TRACE is the highest level and FATAL is the lowest. ApplicationLogLevel ApplicationLogLevel // The format in which Lambda sends your function's application and system logs to @@ -953,7 +957,8 @@ type LoggingConfig struct { LogGroup *string // Set this property to filter the system logs for your function that Lambda sends - // to CloudWatch. Lambda only sends system logs at the selected level and lower. + // to CloudWatch. Lambda only sends system logs at the selected level of detail and + // lower, where DEBUG is the highest level and WARN is the lowest. SystemLogLevel SystemLogLevel noSmithyDocumentSerde @@ -962,7 +967,15 @@ type LoggingConfig struct { // A destination for events that failed processing. type OnFailure struct { - // The Amazon Resource Name (ARN) of the destination resource. + // The Amazon Resource Name (ARN) of the destination resource. To retain records + // of asynchronous invocations (https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations) + // , you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or + // Amazon EventBridge event bus as the destination. To retain records of failed + // invocations from Kinesis and DynamoDB event sources (https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations) + // , you can configure an Amazon SNS topic or Amazon SQS queue as the destination. + // To retain records of failed invocations from self-managed Kafka (https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) + // or Amazon MSK (https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination) + // , you can configure an Amazon SNS topic or Amazon SQS queue as the destination. Destination *string noSmithyDocumentSerde diff --git a/service/osis/internal/endpoints/endpoints.go b/service/osis/internal/endpoints/endpoints.go index 72007c9b593..001510d039b 100644 --- a/service/osis/internal/endpoints/endpoints.go +++ b/service/osis/internal/endpoints/endpoints.go @@ -160,12 +160,18 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-west-2", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "us-east-1", }: endpoints.Endpoint{}, diff --git a/service/rds/api_op_CreateDBCluster.go b/service/rds/api_op_CreateDBCluster.go index 424ec9883c1..fc0876ceac0 100644 --- a/service/rds/api_op_CreateDBCluster.go +++ b/service/rds/api_op_CreateDBCluster.go @@ -47,7 +47,8 @@ type CreateDBClusterInput struct { // The identifier for this DB cluster. This parameter is stored as a lowercase // string. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters // Constraints: - // - Must contain from 1 to 63 letters, numbers, or hyphens. + // - Must contain from 1 to 63 (for Aurora DB clusters) or 1 to 52 (for Multi-AZ + // DB clusters) letters, numbers, or hyphens. // - First character must be a letter. // - Can't end with a hyphen or contain two consecutive hyphens. // Example: my-cluster1 diff --git a/service/rds/api_op_CreateDBClusterParameterGroup.go b/service/rds/api_op_CreateDBClusterParameterGroup.go index 29cb6283895..30272efadd4 100644 --- a/service/rds/api_op_CreateDBClusterParameterGroup.go +++ b/service/rds/api_op_CreateDBClusterParameterGroup.go @@ -68,7 +68,7 @@ type CreateDBClusterParameterGroupInput struct { // applied only to a DB cluster running a database engine and engine version // compatible with that DB cluster parameter group family. Aurora MySQL Example: // aurora-mysql5.7 , aurora-mysql8.0 Aurora PostgreSQL Example: aurora-postgresql14 - // RDS for MySQL Example: mysql8.0 RDS for PostgreSQL Example: postgres12 To list + // RDS for MySQL Example: mysql8.0 RDS for PostgreSQL Example: postgres13 To list // all of the available parameter group families for a DB engine, use the following // command: aws rds describe-db-engine-versions --query // "DBEngineVersions[].DBParameterGroupFamily" --engine For example, to list all diff --git a/service/rds/types/types.go b/service/rds/types/types.go index 6aa1c3816cf..f90f6b2e796 100644 --- a/service/rds/types/types.go +++ b/service/rds/types/types.go @@ -2943,6 +2943,7 @@ type FailoverState struct { // - DescribeDBClusters // - DescribeDBInstances // - DescribeDBRecommendations +// - DescribeDBShardGroups // - DescribePendingMaintenanceActions type Filter struct { diff --git a/service/redshiftserverless/internal/endpoints/endpoints.go b/service/redshiftserverless/internal/endpoints/endpoints.go index fb6c3bd6fcc..c8d7c36b9f2 100644 --- a/service/redshiftserverless/internal/endpoints/endpoints.go +++ b/service/redshiftserverless/internal/endpoints/endpoints.go @@ -157,6 +157,12 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, @@ -172,18 +178,87 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "eu-west-3", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "fips-ca-central-1", + }: endpoints.Endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-1", + }: endpoints.Endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-east-2", + }: endpoints.Endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-1", + }: endpoints.Endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-west-2", + }: endpoints.Endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "us-east-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "us-east-2", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + }, endpoints.EndpointKey{ Region: "us-west-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "us-west-2", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + }, }, }, { diff --git a/service/sns/api_op_CreatePlatformApplication.go b/service/sns/api_op_CreatePlatformApplication.go index 9fc8e4b6172..00893cfec9d 100644 --- a/service/sns/api_op_CreatePlatformApplication.go +++ b/service/sns/api_op_CreatePlatformApplication.go @@ -25,8 +25,14 @@ import ( // is SSL certificate and PlatformCredential is private key . // - For APNS and APNS_SANDBOX using token credentials, PlatformPrincipal is // signing key ID and PlatformCredential is signing key . -// - For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal and the -// PlatformCredential is API key . +// - For GCM (Firebase Cloud Messaging) using key credentials, there is no +// PlatformPrincipal . The PlatformCredential is API key . +// - For GCM (Firebase Cloud Messaging) using token credentials, there is no +// PlatformPrincipal . The PlatformCredential is a JSON formatted private key +// file. When using the Amazon Web Services CLI, the file must be in string format +// and special characters must be ignored. To format the file correctly, Amazon SNS +// recommends using the following command: SERVICE_JSON=`jq @json <<< cat +// service.json` . // - For MPNS , PlatformPrincipal is TLS certificate and PlatformCredential is // private key . // - For WNS , PlatformPrincipal is Package Security Identifier and diff --git a/service/sns/api_op_GetEndpointAttributes.go b/service/sns/api_op_GetEndpointAttributes.go index d0a156f3901..776e29a52bc 100644 --- a/service/sns/api_op_GetEndpointAttributes.go +++ b/service/sns/api_op_GetEndpointAttributes.go @@ -41,7 +41,7 @@ type GetEndpointAttributesInput struct { noSmithyDocumentSerde } -// Response from GetEndpointAttributes of the EndpointArn. +// Response from GetEndpointAttributes of the EndpointArn . type GetEndpointAttributesOutput struct { // Attributes include the following: diff --git a/service/sns/api_op_GetPlatformApplicationAttributes.go b/service/sns/api_op_GetPlatformApplicationAttributes.go index accb8cb2995..5b8d3353c92 100644 --- a/service/sns/api_op_GetPlatformApplicationAttributes.go +++ b/service/sns/api_op_GetPlatformApplicationAttributes.go @@ -51,6 +51,10 @@ type GetPlatformApplicationAttributesOutput struct { // token-based authentication. // - ApplePlatformBundleID – The app identifier used to configure token-based // authentication. + // - AuthenticationMethod – Returns the credential type used when sending push + // notifications from application to APNS/APNS_Sandbox, or application to GCM. + // - APNS – Returns the token or certificate. + // - GCM – Returns the token or key. // - EventEndpointCreated – Topic ARN to which EndpointCreated event // notifications should be sent. // - EventEndpointDeleted – Topic ARN to which EndpointDeleted event diff --git a/service/sns/api_op_SetPlatformApplicationAttributes.go b/service/sns/api_op_SetPlatformApplicationAttributes.go index 2ecdb50e1ac..507c0aa1b14 100644 --- a/service/sns/api_op_SetPlatformApplicationAttributes.go +++ b/service/sns/api_op_SetPlatformApplicationAttributes.go @@ -43,7 +43,14 @@ type SetPlatformApplicationAttributesInput struct { // private key. // - For Apple Services using token credentials, PlatformCredential is signing // key. - // - For GCM (Firebase Cloud Messaging), PlatformCredential is API key. + // - For GCM (Firebase Cloud Messaging) using key credentials, there is no + // PlatformPrincipal . The PlatformCredential is API key . + // - For GCM (Firebase Cloud Messaging) using token credentials, there is no + // PlatformPrincipal . The PlatformCredential is a JSON formatted private key + // file. When using the Amazon Web Services CLI, the file must be in string format + // and special characters must be ignored. To format the file correctly, Amazon SNS + // recommends using the following command: SERVICE_JSON=`jq @json <<< cat + // service.json` . // // - PlatformPrincipal – The principal received from the notification service. // - For ADM, PlatformPrincipal is client id.