diff --git a/.changelog/1d9ad6a14d4d4f9ba56a0995cb57e2c6.json b/.changelog/1d9ad6a14d4d4f9ba56a0995cb57e2c6.json new file mode 100644 index 00000000000..9176ee5c3a1 --- /dev/null +++ b/.changelog/1d9ad6a14d4d4f9ba56a0995cb57e2c6.json @@ -0,0 +1,8 @@ +{ + "id": "1d9ad6a1-4d4d-4f9b-a56a-0995cb57e2c6", + "type": "feature", + "description": "This release adds support for Enhanced Monitoring and Performance Insights when restoring Aurora Limitless Database DB clusters. It also adds support for the os-upgrade pending maintenance action.", + "modules": [ + "service/rds" + ] +} \ No newline at end of file diff --git a/.changelog/35494d93c42c40a3a69a115a768e4941.json b/.changelog/35494d93c42c40a3a69a115a768e4941.json new file mode 100644 index 00000000000..f7fdd2aefc7 --- /dev/null +++ b/.changelog/35494d93c42c40a3a69a115a768e4941.json @@ -0,0 +1,8 @@ +{ + "id": "35494d93-c42c-40a3-a69a-115a768e4941", + "type": "feature", + "description": "Adds support for provisioning dedicated coordinator nodes. Coordinator nodes can be specified using the new NodeOptions parameter in ClusterConfig.", + "modules": [ + "service/opensearch" + ] +} \ No newline at end of file diff --git a/.changelog/5b4fbaf3da264be482b71a9dfe5376a3.json b/.changelog/5b4fbaf3da264be482b71a9dfe5376a3.json new file mode 100644 index 00000000000..5e19fede6a8 --- /dev/null +++ b/.changelog/5b4fbaf3da264be482b71a9dfe5376a3.json @@ -0,0 +1,8 @@ +{ + "id": "5b4fbaf3-da26-4be4-82b7-1a9dfe5376a3", + "type": "feature", + "description": "MediaPackage V2 Live to VOD Harvester is a MediaPackage V2 feature, which is used to export content from an origin endpoint to a S3 bucket.", + "modules": [ + "service/mediapackagev2" + ] +} \ No newline at end of file diff --git a/.changelog/ba34b942d4164911a14d256657a2ee8f.json b/.changelog/ba34b942d4164911a14d256657a2ee8f.json new file mode 100644 index 00000000000..72a041e528e --- /dev/null +++ b/.changelog/ba34b942d4164911a14d256657a2ee8f.json @@ -0,0 +1,8 @@ +{ + "id": "ba34b942-d416-4911-a14d-256657a2ee8f", + "type": "documentation", + "description": "Documentation update: Amazon FSx File Gateway will no longer be available to new customers.", + "modules": [ + "service/storagegateway" + ] +} \ No newline at end of file diff --git a/feature/dynamodbstreams/attributevalue/go_module_metadata.go b/feature/dynamodbstreams/attributevalue/go_module_metadata.go index 63b8da63e8e..7a98a199477 100644 --- a/feature/dynamodbstreams/attributevalue/go_module_metadata.go +++ b/feature/dynamodbstreams/attributevalue/go_module_metadata.go @@ -3,4 +3,4 @@ package attributevalue // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.14.43" +const goModuleVersion = "1.15.12" diff --git a/service/mediapackagev2/api_op_CancelHarvestJob.go b/service/mediapackagev2/api_op_CancelHarvestJob.go new file mode 100644 index 00000000000..74cbe48e822 --- /dev/null +++ b/service/mediapackagev2/api_op_CancelHarvestJob.go @@ -0,0 +1,174 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package mediapackagev2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Cancels an in-progress harvest job. +func (c *Client) CancelHarvestJob(ctx context.Context, params *CancelHarvestJobInput, optFns ...func(*Options)) (*CancelHarvestJobOutput, error) { + if params == nil { + params = &CancelHarvestJobInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CancelHarvestJob", params, optFns, c.addOperationCancelHarvestJobMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CancelHarvestJobOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CancelHarvestJobInput struct { + + // The name of the channel group containing the channel from which the harvest job + // is running. + // + // This member is required. + ChannelGroupName *string + + // The name of the channel from which the harvest job is running. + // + // This member is required. + ChannelName *string + + // The name of the harvest job to cancel. This name must be unique within the + // channel and cannot be changed after the harvest job is submitted. + // + // This member is required. + HarvestJobName *string + + // The name of the origin endpoint that the harvest job is harvesting from. This + // cannot be changed after the harvest job is submitted. + // + // This member is required. + OriginEndpointName *string + + // The current Entity Tag (ETag) associated with the harvest job. Used for + // concurrency control. + ETag *string + + noSmithyDocumentSerde +} + +type CancelHarvestJobOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCancelHarvestJobMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpCancelHarvestJob{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCancelHarvestJob{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CancelHarvestJob"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCancelHarvestJobValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCancelHarvestJob(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCancelHarvestJob(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CancelHarvestJob", + } +} diff --git a/service/mediapackagev2/api_op_CreateHarvestJob.go b/service/mediapackagev2/api_op_CreateHarvestJob.go new file mode 100644 index 00000000000..2c1819bd8ae --- /dev/null +++ b/service/mediapackagev2/api_op_CreateHarvestJob.go @@ -0,0 +1,304 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package mediapackagev2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/mediapackagev2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Creates a new harvest job to export content from a MediaPackage v2 channel to +// an S3 bucket. +func (c *Client) CreateHarvestJob(ctx context.Context, params *CreateHarvestJobInput, optFns ...func(*Options)) (*CreateHarvestJobOutput, error) { + if params == nil { + params = &CreateHarvestJobInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateHarvestJob", params, optFns, c.addOperationCreateHarvestJobMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateHarvestJobOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request object for creating a new harvest job. +type CreateHarvestJobInput struct { + + // The name of the channel group containing the channel from which to harvest + // content. + // + // This member is required. + ChannelGroupName *string + + // The name of the channel from which to harvest content. + // + // This member is required. + ChannelName *string + + // The S3 destination where the harvested content will be placed. + // + // This member is required. + Destination *types.Destination + + // A list of manifests to be harvested. + // + // This member is required. + HarvestedManifests *types.HarvestedManifests + + // The name of the origin endpoint from which to harvest content. + // + // This member is required. + OriginEndpointName *string + + // The configuration for when the harvest job should run, including start and end + // times. + // + // This member is required. + ScheduleConfiguration *types.HarvesterScheduleConfiguration + + // A unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. + ClientToken *string + + // An optional description for the harvest job. + Description *string + + // A name for the harvest job. This name must be unique within the channel. + HarvestJobName *string + + // A collection of tags associated with the harvest job. + Tags map[string]string + + noSmithyDocumentSerde +} + +// The response object returned after creating a harvest job. +type CreateHarvestJobOutput struct { + + // The Amazon Resource Name (ARN) of the created harvest job. + // + // This member is required. + Arn *string + + // The name of the channel group containing the channel from which content is + // being harvested. + // + // This member is required. + ChannelGroupName *string + + // The name of the channel from which content is being harvested. + // + // This member is required. + ChannelName *string + + // The date and time the harvest job was created. + // + // This member is required. + CreatedAt *time.Time + + // The S3 destination where the harvested content will be placed. + // + // This member is required. + Destination *types.Destination + + // The name of the created harvest job. + // + // This member is required. + HarvestJobName *string + + // A list of manifests that will be harvested. + // + // This member is required. + HarvestedManifests *types.HarvestedManifests + + // The date and time the harvest job was last modified. + // + // This member is required. + ModifiedAt *time.Time + + // The name of the origin endpoint from which content is being harvested. + // + // This member is required. + OriginEndpointName *string + + // The configuration for when the harvest job will run, including start and end + // times. + // + // This member is required. + ScheduleConfiguration *types.HarvesterScheduleConfiguration + + // The current status of the harvest job (e.g., CREATED, IN_PROGRESS, ABORTED, + // COMPLETED, FAILED). + // + // This member is required. + Status types.HarvestJobStatus + + // The description of the harvest job, if provided. + Description *string + + // The current version of the harvest job. Used for concurrency control. + ETag *string + + // An error message if the harvest job creation failed. + ErrorMessage *string + + // A collection of tags associated with the harvest job. + Tags map[string]string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateHarvestJobMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateHarvestJob{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateHarvestJob{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateHarvestJob"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opCreateHarvestJobMiddleware(stack, options); err != nil { + return err + } + if err = addOpCreateHarvestJobValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateHarvestJob(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpCreateHarvestJob struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpCreateHarvestJob) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpCreateHarvestJob) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*CreateHarvestJobInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateHarvestJobInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opCreateHarvestJobMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpCreateHarvestJob{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opCreateHarvestJob(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateHarvestJob", + } +} diff --git a/service/mediapackagev2/api_op_GetHarvestJob.go b/service/mediapackagev2/api_op_GetHarvestJob.go new file mode 100644 index 00000000000..dcc6b6f7791 --- /dev/null +++ b/service/mediapackagev2/api_op_GetHarvestJob.go @@ -0,0 +1,500 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package mediapackagev2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/mediapackagev2/types" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + smithywaiter "github.com/aws/smithy-go/waiter" + jmespath "github.com/jmespath/go-jmespath" + "time" +) + +// Retrieves the details of a specific harvest job. +func (c *Client) GetHarvestJob(ctx context.Context, params *GetHarvestJobInput, optFns ...func(*Options)) (*GetHarvestJobOutput, error) { + if params == nil { + params = &GetHarvestJobInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetHarvestJob", params, optFns, c.addOperationGetHarvestJobMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetHarvestJobOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request object for retrieving a specific harvest job. +type GetHarvestJobInput struct { + + // The name of the channel group containing the channel associated with the + // harvest job. + // + // This member is required. + ChannelGroupName *string + + // The name of the channel associated with the harvest job. + // + // This member is required. + ChannelName *string + + // The name of the harvest job to retrieve. + // + // This member is required. + HarvestJobName *string + + // The name of the origin endpoint associated with the harvest job. + // + // This member is required. + OriginEndpointName *string + + noSmithyDocumentSerde +} + +// The response object containing the details of the requested harvest job. +type GetHarvestJobOutput struct { + + // The Amazon Resource Name (ARN) of the harvest job. + // + // This member is required. + Arn *string + + // The name of the channel group containing the channel associated with the + // harvest job. + // + // This member is required. + ChannelGroupName *string + + // The name of the channel associated with the harvest job. + // + // This member is required. + ChannelName *string + + // The date and time when the harvest job was created. + // + // This member is required. + CreatedAt *time.Time + + // The S3 destination where the harvested content is being placed. + // + // This member is required. + Destination *types.Destination + + // The name of the harvest job. + // + // This member is required. + HarvestJobName *string + + // A list of manifests that are being or have been harvested. + // + // This member is required. + HarvestedManifests *types.HarvestedManifests + + // The date and time when the harvest job was last modified. + // + // This member is required. + ModifiedAt *time.Time + + // The name of the origin endpoint associated with the harvest job. + // + // This member is required. + OriginEndpointName *string + + // The configuration for when the harvest job is scheduled to run, including start + // and end times. + // + // This member is required. + ScheduleConfiguration *types.HarvesterScheduleConfiguration + + // The current status of the harvest job (e.g., QUEUED, IN_PROGRESS, CANCELLED, + // COMPLETED, FAILED). + // + // This member is required. + Status types.HarvestJobStatus + + // The description of the harvest job, if provided. + Description *string + + // The current version of the harvest job. Used for concurrency control. + ETag *string + + // An error message if the harvest job encountered any issues. + ErrorMessage *string + + // A collection of tags associated with the harvest job. + Tags map[string]string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetHarvestJobMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetHarvestJob{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetHarvestJob{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetHarvestJob"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGetHarvestJobValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetHarvestJob(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// HarvestJobFinishedWaiterOptions are waiter options for HarvestJobFinishedWaiter +type HarvestJobFinishedWaiterOptions struct { + + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. + APIOptions []func(*middleware.Stack) error + + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + + // MinDelay is the minimum amount of time to delay between retries. If unset, + // HarvestJobFinishedWaiter will use default minimum delay of 2 seconds. Note that + // MinDelay must resolve to a value lesser than or equal to the MaxDelay. + MinDelay time.Duration + + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, HarvestJobFinishedWaiter will use default max delay of 120 seconds. + // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. + MaxDelay time.Duration + + // LogWaitAttempts is used to enable logging for waiter retry attempts + LogWaitAttempts bool + + // Retryable is function that can be used to override the service defined + // waiter-behavior based on operation output, or returned error. This function is + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. + Retryable func(context.Context, *GetHarvestJobInput, *GetHarvestJobOutput, error) (bool, error) +} + +// HarvestJobFinishedWaiter defines the waiters for HarvestJobFinished +type HarvestJobFinishedWaiter struct { + client GetHarvestJobAPIClient + + options HarvestJobFinishedWaiterOptions +} + +// NewHarvestJobFinishedWaiter constructs a HarvestJobFinishedWaiter. +func NewHarvestJobFinishedWaiter(client GetHarvestJobAPIClient, optFns ...func(*HarvestJobFinishedWaiterOptions)) *HarvestJobFinishedWaiter { + options := HarvestJobFinishedWaiterOptions{} + options.MinDelay = 2 * time.Second + options.MaxDelay = 120 * time.Second + options.Retryable = harvestJobFinishedStateRetryable + + for _, fn := range optFns { + fn(&options) + } + return &HarvestJobFinishedWaiter{ + client: client, + options: options, + } +} + +// Wait calls the waiter function for HarvestJobFinished waiter. The maxWaitDur is +// the maximum wait duration the waiter will wait. The maxWaitDur is required and +// must be greater than zero. +func (w *HarvestJobFinishedWaiter) Wait(ctx context.Context, params *GetHarvestJobInput, maxWaitDur time.Duration, optFns ...func(*HarvestJobFinishedWaiterOptions)) error { + _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) + return err +} + +// WaitForOutput calls the waiter function for HarvestJobFinished waiter and +// returns the output of the successful operation. The maxWaitDur is the maximum +// wait duration the waiter will wait. The maxWaitDur is required and must be +// greater than zero. +func (w *HarvestJobFinishedWaiter) WaitForOutput(ctx context.Context, params *GetHarvestJobInput, maxWaitDur time.Duration, optFns ...func(*HarvestJobFinishedWaiterOptions)) (*GetHarvestJobOutput, error) { + if maxWaitDur <= 0 { + return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero") + } + + options := w.options + for _, fn := range optFns { + fn(&options) + } + + if options.MaxDelay <= 0 { + options.MaxDelay = 120 * time.Second + } + + if options.MinDelay > options.MaxDelay { + return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay) + } + + ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur) + defer cancelFn() + + logger := smithywaiter.Logger{} + remainingTime := maxWaitDur + + var attempt int64 + for { + + attempt++ + apiOptions := options.APIOptions + start := time.Now() + + if options.LogWaitAttempts { + logger.Attempt = attempt + apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...) + apiOptions = append(apiOptions, logger.AddLogger) + } + + out, err := w.client.GetHarvestJob(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } + o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } + }) + + retryable, err := options.Retryable(ctx, params, out, err) + if err != nil { + return nil, err + } + if !retryable { + return out, nil + } + + remainingTime -= time.Since(start) + if remainingTime < options.MinDelay || remainingTime <= 0 { + break + } + + // compute exponential backoff between waiter retries + delay, err := smithywaiter.ComputeDelay( + attempt, options.MinDelay, options.MaxDelay, remainingTime, + ) + if err != nil { + return nil, fmt.Errorf("error computing waiter delay, %w", err) + } + + remainingTime -= delay + // sleep for the delay amount before invoking a request + if err := smithytime.SleepWithContext(ctx, delay); err != nil { + return nil, fmt.Errorf("request cancelled while waiting, %w", err) + } + } + return nil, fmt.Errorf("exceeded max wait time for HarvestJobFinished waiter") +} + +func harvestJobFinishedStateRetryable(ctx context.Context, input *GetHarvestJobInput, output *GetHarvestJobOutput, err error) (bool, error) { + + if err == nil { + pathValue, err := jmespath.Search("Status", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "COMPLETED" + value, ok := pathValue.(types.HarvestJobStatus) + if !ok { + return false, fmt.Errorf("waiter comparator expected types.HarvestJobStatus value, got %T", pathValue) + } + + if string(value) == expectedValue { + return false, nil + } + } + + if err == nil { + pathValue, err := jmespath.Search("Status", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "CANCELLED" + value, ok := pathValue.(types.HarvestJobStatus) + if !ok { + return false, fmt.Errorf("waiter comparator expected types.HarvestJobStatus value, got %T", pathValue) + } + + if string(value) == expectedValue { + return false, nil + } + } + + if err == nil { + pathValue, err := jmespath.Search("Status", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "FAILED" + value, ok := pathValue.(types.HarvestJobStatus) + if !ok { + return false, fmt.Errorf("waiter comparator expected types.HarvestJobStatus value, got %T", pathValue) + } + + if string(value) == expectedValue { + return false, fmt.Errorf("waiter state transitioned to Failure") + } + } + + if err == nil { + pathValue, err := jmespath.Search("Status", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "QUEUED" + value, ok := pathValue.(types.HarvestJobStatus) + if !ok { + return false, fmt.Errorf("waiter comparator expected types.HarvestJobStatus value, got %T", pathValue) + } + + if string(value) == expectedValue { + return true, nil + } + } + + if err == nil { + pathValue, err := jmespath.Search("Status", output) + if err != nil { + return false, fmt.Errorf("error evaluating waiter state: %w", err) + } + + expectedValue := "IN_PROGRESS" + value, ok := pathValue.(types.HarvestJobStatus) + if !ok { + return false, fmt.Errorf("waiter comparator expected types.HarvestJobStatus value, got %T", pathValue) + } + + if string(value) == expectedValue { + return true, nil + } + } + + return true, nil +} + +// GetHarvestJobAPIClient is a client that implements the GetHarvestJob operation. +type GetHarvestJobAPIClient interface { + GetHarvestJob(context.Context, *GetHarvestJobInput, ...func(*Options)) (*GetHarvestJobOutput, error) +} + +var _ GetHarvestJobAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opGetHarvestJob(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetHarvestJob", + } +} diff --git a/service/mediapackagev2/api_op_ListHarvestJobs.go b/service/mediapackagev2/api_op_ListHarvestJobs.go new file mode 100644 index 00000000000..c016117adb8 --- /dev/null +++ b/service/mediapackagev2/api_op_ListHarvestJobs.go @@ -0,0 +1,280 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package mediapackagev2 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/mediapackagev2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves a list of harvest jobs that match the specified criteria. +func (c *Client) ListHarvestJobs(ctx context.Context, params *ListHarvestJobsInput, optFns ...func(*Options)) (*ListHarvestJobsOutput, error) { + if params == nil { + params = &ListHarvestJobsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListHarvestJobs", params, optFns, c.addOperationListHarvestJobsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListHarvestJobsOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request object for listing harvest jobs. +type ListHarvestJobsInput struct { + + // The name of the channel group to filter the harvest jobs by. If specified, only + // harvest jobs associated with channels in this group will be returned. + // + // This member is required. + ChannelGroupName *string + + // The name of the channel to filter the harvest jobs by. If specified, only + // harvest jobs associated with this channel will be returned. + ChannelName *string + + // The maximum number of harvest jobs to return in a single request. If not + // specified, a default value will be used. + MaxResults *int32 + + // A token used for pagination. Provide this value in subsequent requests to + // retrieve the next set of results. + NextToken *string + + // The name of the origin endpoint to filter the harvest jobs by. If specified, + // only harvest jobs associated with this origin endpoint will be returned. + OriginEndpointName *string + + // The status to filter the harvest jobs by. If specified, only harvest jobs with + // this status will be returned. + Status types.HarvestJobStatus + + noSmithyDocumentSerde +} + +// The response object containing the list of harvest jobs that match the +// specified criteria. +type ListHarvestJobsOutput struct { + + // An array of harvest job objects that match the specified criteria. + Items []types.HarvestJob + + // A token used for pagination. Include this value in subsequent requests to + // retrieve the next set of results. If null, there are no more results to + // retrieve. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListHarvestJobsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpListHarvestJobs{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListHarvestJobs{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListHarvestJobs"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListHarvestJobsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListHarvestJobs(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListHarvestJobsPaginatorOptions is the paginator options for ListHarvestJobs +type ListHarvestJobsPaginatorOptions struct { + // The maximum number of harvest jobs to return in a single request. If not + // specified, a default value will be used. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListHarvestJobsPaginator is a paginator for ListHarvestJobs +type ListHarvestJobsPaginator struct { + options ListHarvestJobsPaginatorOptions + client ListHarvestJobsAPIClient + params *ListHarvestJobsInput + nextToken *string + firstPage bool +} + +// NewListHarvestJobsPaginator returns a new ListHarvestJobsPaginator +func NewListHarvestJobsPaginator(client ListHarvestJobsAPIClient, params *ListHarvestJobsInput, optFns ...func(*ListHarvestJobsPaginatorOptions)) *ListHarvestJobsPaginator { + if params == nil { + params = &ListHarvestJobsInput{} + } + + options := ListHarvestJobsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListHarvestJobsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListHarvestJobsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListHarvestJobs page. +func (p *ListHarvestJobsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListHarvestJobsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListHarvestJobs(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListHarvestJobsAPIClient is a client that implements the ListHarvestJobs +// operation. +type ListHarvestJobsAPIClient interface { + ListHarvestJobs(context.Context, *ListHarvestJobsInput, ...func(*Options)) (*ListHarvestJobsOutput, error) +} + +var _ ListHarvestJobsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListHarvestJobs(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListHarvestJobs", + } +} diff --git a/service/mediapackagev2/deserializers.go b/service/mediapackagev2/deserializers.go index 4cdf4141a9a..af08f4ea784 100644 --- a/service/mediapackagev2/deserializers.go +++ b/service/mediapackagev2/deserializers.go @@ -31,6 +31,109 @@ func deserializeS3Expires(v string) (*time.Time, error) { return &t, nil } +type awsRestjson1_deserializeOpCancelHarvestJob struct { +} + +func (*awsRestjson1_deserializeOpCancelHarvestJob) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCancelHarvestJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCancelHarvestJob(response, &metadata) + } + output := &CancelHarvestJobOutput{} + out.Result = output + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCancelHarvestJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsRestjson1_deserializeOpCreateChannel struct { } @@ -539,14 +642,14 @@ func awsRestjson1_deserializeOpDocumentCreateChannelGroupOutput(v **CreateChanne return nil } -type awsRestjson1_deserializeOpCreateOriginEndpoint struct { +type awsRestjson1_deserializeOpCreateHarvestJob struct { } -func (*awsRestjson1_deserializeOpCreateOriginEndpoint) ID() string { +func (*awsRestjson1_deserializeOpCreateHarvestJob) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpCreateOriginEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpCreateHarvestJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -564,9 +667,9 @@ func (m *awsRestjson1_deserializeOpCreateOriginEndpoint) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorCreateOriginEndpoint(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorCreateHarvestJob(response, &metadata) } - output := &CreateOriginEndpointOutput{} + output := &CreateHarvestJobOutput{} out.Result = output var buff [1024]byte @@ -587,7 +690,7 @@ func (m *awsRestjson1_deserializeOpCreateOriginEndpoint) HandleDeserialize(ctx c return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentCreateOriginEndpointOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentCreateHarvestJobOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -601,7 +704,7 @@ func (m *awsRestjson1_deserializeOpCreateOriginEndpoint) HandleDeserialize(ctx c return out, metadata, err } -func awsRestjson1_deserializeOpErrorCreateOriginEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorCreateHarvestJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -673,7 +776,7 @@ func awsRestjson1_deserializeOpErrorCreateOriginEndpoint(response *smithyhttp.Re } } -func awsRestjson1_deserializeOpDocumentCreateOriginEndpointOutput(v **CreateOriginEndpointOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentCreateHarvestJobOutput(v **CreateHarvestJobOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -686,9 +789,9 @@ func awsRestjson1_deserializeOpDocumentCreateOriginEndpointOutput(v **CreateOrig return fmt.Errorf("unexpected JSON type %v", value) } - var sv *CreateOriginEndpointOutput + var sv *CreateHarvestJobOutput if *v == nil { - sv = &CreateOriginEndpointOutput{} + sv = &CreateHarvestJobOutput{} } else { sv = *v } @@ -722,15 +825,6 @@ func awsRestjson1_deserializeOpDocumentCreateOriginEndpointOutput(v **CreateOrig sv.ChannelName = ptr.String(jtv) } - case "ContainerType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ContainerType to be of type string, got %T instead", value) - } - sv.ContainerType = types.ContainerType(jtv) - } - case "CreatedAt": if value != nil { switch jtv := value.(type) { @@ -747,11 +841,6 @@ func awsRestjson1_deserializeOpDocumentCreateOriginEndpointOutput(v **CreateOrig } } - case "DashManifests": - if err := awsRestjson1_deserializeDocumentGetDashManifests(&sv.DashManifests, value); err != nil { - return err - } - case "Description": if value != nil { jtv, ok := value.(string) @@ -761,6 +850,20 @@ func awsRestjson1_deserializeOpDocumentCreateOriginEndpointOutput(v **CreateOrig sv.Description = ptr.String(jtv) } + case "Destination": + if err := awsRestjson1_deserializeDocumentDestination(&sv.Destination, value); err != nil { + return err + } + + case "ErrorMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ErrorMessage = ptr.String(jtv) + } + case "ETag": if value != nil { jtv, ok := value.(string) @@ -770,19 +873,18 @@ func awsRestjson1_deserializeOpDocumentCreateOriginEndpointOutput(v **CreateOrig sv.ETag = ptr.String(jtv) } - case "ForceEndpointErrorConfiguration": - if err := awsRestjson1_deserializeDocumentForceEndpointErrorConfiguration(&sv.ForceEndpointErrorConfiguration, value); err != nil { - return err - } - - case "HlsManifests": - if err := awsRestjson1_deserializeDocumentGetHlsManifests(&sv.HlsManifests, value); err != nil { + case "HarvestedManifests": + if err := awsRestjson1_deserializeDocumentHarvestedManifests(&sv.HarvestedManifests, value); err != nil { return err } - case "LowLatencyHlsManifests": - if err := awsRestjson1_deserializeDocumentGetLowLatencyHlsManifests(&sv.LowLatencyHlsManifests, value); err != nil { - return err + case "HarvestJobName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.HarvestJobName = ptr.String(jtv) } case "ModifiedAt": @@ -810,22 +912,18 @@ func awsRestjson1_deserializeOpDocumentCreateOriginEndpointOutput(v **CreateOrig sv.OriginEndpointName = ptr.String(jtv) } - case "Segment": - if err := awsRestjson1_deserializeDocumentSegment(&sv.Segment, value); err != nil { + case "ScheduleConfiguration": + if err := awsRestjson1_deserializeDocumentHarvesterScheduleConfiguration(&sv.ScheduleConfiguration, value); err != nil { return err } - case "StartoverWindowSeconds": + case "Status": if value != nil { - jtv, ok := value.(json.Number) + jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + return fmt.Errorf("expected HarvestJobStatus to be of type string, got %T instead", value) } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.StartoverWindowSeconds = ptr.Int32(int32(i64)) + sv.Status = types.HarvestJobStatus(jtv) } case "Tags": @@ -842,14 +940,14 @@ func awsRestjson1_deserializeOpDocumentCreateOriginEndpointOutput(v **CreateOrig return nil } -type awsRestjson1_deserializeOpDeleteChannel struct { +type awsRestjson1_deserializeOpCreateOriginEndpoint struct { } -func (*awsRestjson1_deserializeOpDeleteChannel) ID() string { +func (*awsRestjson1_deserializeOpCreateOriginEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpDeleteChannel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpCreateOriginEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -867,16 +965,44 @@ func (m *awsRestjson1_deserializeOpDeleteChannel) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorDeleteChannel(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorCreateOriginEndpoint(response, &metadata) } - output := &DeleteChannelOutput{} + output := &CreateOriginEndpointOutput{} out.Result = output + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateOriginEndpointOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + span.End() return out, metadata, err } -func awsRestjson1_deserializeOpErrorDeleteChannel(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorCreateOriginEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -926,6 +1052,12 @@ func awsRestjson1_deserializeOpErrorDeleteChannel(response *smithyhttp.Response, case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + case strings.EqualFold("ThrottlingException", errorCode): return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) @@ -942,114 +1074,183 @@ func awsRestjson1_deserializeOpErrorDeleteChannel(response *smithyhttp.Response, } } -type awsRestjson1_deserializeOpDeleteChannelGroup struct { -} - -func (*awsRestjson1_deserializeOpDeleteChannelGroup) ID() string { - return "OperationDeserializer" -} - -func (m *awsRestjson1_deserializeOpDeleteChannelGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err +func awsRestjson1_deserializeOpDocumentCreateOriginEndpointOutput(v **CreateOriginEndpointOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil } - _, span := tracing.StartSpan(ctx, "OperationDeserializer") - endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") - defer endTimer() - defer span.End() - response, ok := out.RawResponse.(*smithyhttp.Response) + shape, ok := value.(map[string]interface{}) if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + return fmt.Errorf("unexpected JSON type %v", value) } - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorDeleteChannelGroup(response, &metadata) + var sv *CreateOriginEndpointOutput + if *v == nil { + sv = &CreateOriginEndpointOutput{} + } else { + sv = *v } - output := &DeleteChannelGroupOutput{} - out.Result = output - span.End() - return out, metadata, err -} + for key, value := range shape { + switch key { + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } -func awsRestjson1_deserializeOpErrorDeleteChannelGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) + case "ChannelGroupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.ChannelGroupName = ptr.String(jtv) + } - errorCode := "UnknownError" - errorMessage := errorCode + case "ChannelName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.ChannelName = ptr.String(jtv) + } - headerCode := response.Header.Get("X-Amzn-ErrorType") - if len(headerCode) != 0 { - errorCode = restjson.SanitizeErrorCode(headerCode) - } + case "ContainerType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ContainerType to be of type string, got %T instead", value) + } + sv.ContainerType = types.ContainerType(jtv) + } - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) + case "CreatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - jsonCode, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) - errorBody.Seek(0, io.SeekStart) - if len(headerCode) == 0 && len(jsonCode) != 0 { - errorCode = restjson.SanitizeErrorCode(jsonCode) - } - if len(message) != 0 { - errorMessage = message - } + } + } - switch { - case strings.EqualFold("AccessDeniedException", errorCode): - return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + case "DashManifests": + if err := awsRestjson1_deserializeDocumentGetDashManifests(&sv.DashManifests, value); err != nil { + return err + } - case strings.EqualFold("ConflictException", errorCode): - return awsRestjson1_deserializeErrorConflictException(response, errorBody) + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceDescription to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + case "ETag": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EntityTag to be of type string, got %T instead", value) + } + sv.ETag = ptr.String(jtv) + } - case strings.EqualFold("ThrottlingException", errorCode): - return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + case "ForceEndpointErrorConfiguration": + if err := awsRestjson1_deserializeDocumentForceEndpointErrorConfiguration(&sv.ForceEndpointErrorConfiguration, value); err != nil { + return err + } - case strings.EqualFold("ValidationException", errorCode): - return awsRestjson1_deserializeErrorValidationException(response, errorBody) + case "HlsManifests": + if err := awsRestjson1_deserializeDocumentGetHlsManifests(&sv.HlsManifests, value); err != nil { + return err + } - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError + case "LowLatencyHlsManifests": + if err := awsRestjson1_deserializeDocumentGetLowLatencyHlsManifests(&sv.LowLatencyHlsManifests, value); err != nil { + return err + } + + case "ModifiedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ModifiedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "OriginEndpointName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.OriginEndpointName = ptr.String(jtv) + } + + case "Segment": + if err := awsRestjson1_deserializeDocumentSegment(&sv.Segment, value); err != nil { + return err + } + + case "StartoverWindowSeconds": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StartoverWindowSeconds = ptr.Int32(int32(i64)) + } + + case "Tags": + if err := awsRestjson1_deserializeDocumentTagMap(&sv.Tags, value); err != nil { + return err + } + default: + _, _ = key, value + + } } + *v = sv + return nil } -type awsRestjson1_deserializeOpDeleteChannelPolicy struct { +type awsRestjson1_deserializeOpDeleteChannel struct { } -func (*awsRestjson1_deserializeOpDeleteChannelPolicy) ID() string { +func (*awsRestjson1_deserializeOpDeleteChannel) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpDeleteChannelPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpDeleteChannel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -1067,16 +1268,16 @@ func (m *awsRestjson1_deserializeOpDeleteChannelPolicy) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorDeleteChannelPolicy(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorDeleteChannel(response, &metadata) } - output := &DeleteChannelPolicyOutput{} + output := &DeleteChannelOutput{} out.Result = output span.End() return out, metadata, err } -func awsRestjson1_deserializeOpErrorDeleteChannelPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorDeleteChannel(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -1142,14 +1343,14 @@ func awsRestjson1_deserializeOpErrorDeleteChannelPolicy(response *smithyhttp.Res } } -type awsRestjson1_deserializeOpDeleteOriginEndpoint struct { +type awsRestjson1_deserializeOpDeleteChannelGroup struct { } -func (*awsRestjson1_deserializeOpDeleteOriginEndpoint) ID() string { +func (*awsRestjson1_deserializeOpDeleteChannelGroup) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpDeleteOriginEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpDeleteChannelGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -1167,16 +1368,16 @@ func (m *awsRestjson1_deserializeOpDeleteOriginEndpoint) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorDeleteOriginEndpoint(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorDeleteChannelGroup(response, &metadata) } - output := &DeleteOriginEndpointOutput{} + output := &DeleteChannelGroupOutput{} out.Result = output span.End() return out, metadata, err } -func awsRestjson1_deserializeOpErrorDeleteOriginEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorDeleteChannelGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -1220,6 +1421,9 @@ func awsRestjson1_deserializeOpErrorDeleteOriginEndpoint(response *smithyhttp.Re case strings.EqualFold("AccessDeniedException", errorCode): return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) @@ -1239,14 +1443,14 @@ func awsRestjson1_deserializeOpErrorDeleteOriginEndpoint(response *smithyhttp.Re } } -type awsRestjson1_deserializeOpDeleteOriginEndpointPolicy struct { +type awsRestjson1_deserializeOpDeleteChannelPolicy struct { } -func (*awsRestjson1_deserializeOpDeleteOriginEndpointPolicy) ID() string { +func (*awsRestjson1_deserializeOpDeleteChannelPolicy) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpDeleteOriginEndpointPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpDeleteChannelPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -1264,16 +1468,16 @@ func (m *awsRestjson1_deserializeOpDeleteOriginEndpointPolicy) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorDeleteOriginEndpointPolicy(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorDeleteChannelPolicy(response, &metadata) } - output := &DeleteOriginEndpointPolicyOutput{} + output := &DeleteChannelPolicyOutput{} out.Result = output span.End() return out, metadata, err } -func awsRestjson1_deserializeOpErrorDeleteOriginEndpointPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorDeleteChannelPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -1339,14 +1543,14 @@ func awsRestjson1_deserializeOpErrorDeleteOriginEndpointPolicy(response *smithyh } } -type awsRestjson1_deserializeOpGetChannel struct { +type awsRestjson1_deserializeOpDeleteOriginEndpoint struct { } -func (*awsRestjson1_deserializeOpGetChannel) ID() string { +func (*awsRestjson1_deserializeOpDeleteOriginEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetChannel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpDeleteOriginEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -1364,44 +1568,16 @@ func (m *awsRestjson1_deserializeOpGetChannel) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetChannel(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorDeleteOriginEndpoint(response, &metadata) } - output := &GetChannelOutput{} + output := &DeleteOriginEndpointOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsRestjson1_deserializeOpDocumentGetChannelOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), - Snapshot: snapshot.Bytes(), - } - } - span.End() return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetChannel(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorDeleteOriginEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -1448,9 +1624,6 @@ func awsRestjson1_deserializeOpErrorGetChannel(response *smithyhttp.Response, me case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - case strings.EqualFold("ThrottlingException", errorCode): return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) @@ -1467,10 +1640,238 @@ func awsRestjson1_deserializeOpErrorGetChannel(response *smithyhttp.Response, me } } -func awsRestjson1_deserializeOpDocumentGetChannelOutput(v **GetChannelOutput, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } +type awsRestjson1_deserializeOpDeleteOriginEndpointPolicy struct { +} + +func (*awsRestjson1_deserializeOpDeleteOriginEndpointPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpDeleteOriginEndpointPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorDeleteOriginEndpointPolicy(response, &metadata) + } + output := &DeleteOriginEndpointPolicyOutput{} + out.Result = output + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorDeleteOriginEndpointPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestjson1_deserializeOpGetChannel struct { +} + +func (*awsRestjson1_deserializeOpGetChannel) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetChannel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetChannel(response, &metadata) + } + output := &GetChannelOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetChannelOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetChannel(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetChannelOutput(v **GetChannelOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } if value == nil { return nil } @@ -2021,14 +2422,14 @@ func awsRestjson1_deserializeOpDocumentGetChannelPolicyOutput(v **GetChannelPoli return nil } -type awsRestjson1_deserializeOpGetOriginEndpoint struct { +type awsRestjson1_deserializeOpGetHarvestJob struct { } -func (*awsRestjson1_deserializeOpGetOriginEndpoint) ID() string { +func (*awsRestjson1_deserializeOpGetHarvestJob) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetOriginEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetHarvestJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2046,9 +2447,9 @@ func (m *awsRestjson1_deserializeOpGetOriginEndpoint) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetOriginEndpoint(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetHarvestJob(response, &metadata) } - output := &GetOriginEndpointOutput{} + output := &GetHarvestJobOutput{} out.Result = output var buff [1024]byte @@ -2069,7 +2470,7 @@ func (m *awsRestjson1_deserializeOpGetOriginEndpoint) HandleDeserialize(ctx cont return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentGetOriginEndpointOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentGetHarvestJobOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2083,7 +2484,7 @@ func (m *awsRestjson1_deserializeOpGetOriginEndpoint) HandleDeserialize(ctx cont return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetOriginEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetHarvestJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -2149,7 +2550,7 @@ func awsRestjson1_deserializeOpErrorGetOriginEndpoint(response *smithyhttp.Respo } } -func awsRestjson1_deserializeOpDocumentGetOriginEndpointOutput(v **GetOriginEndpointOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetHarvestJobOutput(v **GetHarvestJobOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2162,9 +2563,9 @@ func awsRestjson1_deserializeOpDocumentGetOriginEndpointOutput(v **GetOriginEndp return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetOriginEndpointOutput + var sv *GetHarvestJobOutput if *v == nil { - sv = &GetOriginEndpointOutput{} + sv = &GetHarvestJobOutput{} } else { sv = *v } @@ -2198,15 +2599,6 @@ func awsRestjson1_deserializeOpDocumentGetOriginEndpointOutput(v **GetOriginEndp sv.ChannelName = ptr.String(jtv) } - case "ContainerType": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected ContainerType to be of type string, got %T instead", value) - } - sv.ContainerType = types.ContainerType(jtv) - } - case "CreatedAt": if value != nil { switch jtv := value.(type) { @@ -2223,11 +2615,6 @@ func awsRestjson1_deserializeOpDocumentGetOriginEndpointOutput(v **GetOriginEndp } } - case "DashManifests": - if err := awsRestjson1_deserializeDocumentGetDashManifests(&sv.DashManifests, value); err != nil { - return err - } - case "Description": if value != nil { jtv, ok := value.(string) @@ -2237,6 +2624,20 @@ func awsRestjson1_deserializeOpDocumentGetOriginEndpointOutput(v **GetOriginEndp sv.Description = ptr.String(jtv) } + case "Destination": + if err := awsRestjson1_deserializeDocumentDestination(&sv.Destination, value); err != nil { + return err + } + + case "ErrorMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.ErrorMessage = ptr.String(jtv) + } + case "ETag": if value != nil { jtv, ok := value.(string) @@ -2246,19 +2647,18 @@ func awsRestjson1_deserializeOpDocumentGetOriginEndpointOutput(v **GetOriginEndp sv.ETag = ptr.String(jtv) } - case "ForceEndpointErrorConfiguration": - if err := awsRestjson1_deserializeDocumentForceEndpointErrorConfiguration(&sv.ForceEndpointErrorConfiguration, value); err != nil { - return err - } - - case "HlsManifests": - if err := awsRestjson1_deserializeDocumentGetHlsManifests(&sv.HlsManifests, value); err != nil { + case "HarvestedManifests": + if err := awsRestjson1_deserializeDocumentHarvestedManifests(&sv.HarvestedManifests, value); err != nil { return err } - case "LowLatencyHlsManifests": - if err := awsRestjson1_deserializeDocumentGetLowLatencyHlsManifests(&sv.LowLatencyHlsManifests, value); err != nil { - return err + case "HarvestJobName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.HarvestJobName = ptr.String(jtv) } case "ModifiedAt": @@ -2286,22 +2686,18 @@ func awsRestjson1_deserializeOpDocumentGetOriginEndpointOutput(v **GetOriginEndp sv.OriginEndpointName = ptr.String(jtv) } - case "Segment": - if err := awsRestjson1_deserializeDocumentSegment(&sv.Segment, value); err != nil { + case "ScheduleConfiguration": + if err := awsRestjson1_deserializeDocumentHarvesterScheduleConfiguration(&sv.ScheduleConfiguration, value); err != nil { return err } - case "StartoverWindowSeconds": + case "Status": if value != nil { - jtv, ok := value.(json.Number) + jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err + return fmt.Errorf("expected HarvestJobStatus to be of type string, got %T instead", value) } - sv.StartoverWindowSeconds = ptr.Int32(int32(i64)) + sv.Status = types.HarvestJobStatus(jtv) } case "Tags": @@ -2318,14 +2714,14 @@ func awsRestjson1_deserializeOpDocumentGetOriginEndpointOutput(v **GetOriginEndp return nil } -type awsRestjson1_deserializeOpGetOriginEndpointPolicy struct { +type awsRestjson1_deserializeOpGetOriginEndpoint struct { } -func (*awsRestjson1_deserializeOpGetOriginEndpointPolicy) ID() string { +func (*awsRestjson1_deserializeOpGetOriginEndpoint) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetOriginEndpointPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetOriginEndpoint) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2343,9 +2739,9 @@ func (m *awsRestjson1_deserializeOpGetOriginEndpointPolicy) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetOriginEndpointPolicy(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetOriginEndpoint(response, &metadata) } - output := &GetOriginEndpointPolicyOutput{} + output := &GetOriginEndpointOutput{} out.Result = output var buff [1024]byte @@ -2366,7 +2762,7 @@ func (m *awsRestjson1_deserializeOpGetOriginEndpointPolicy) HandleDeserialize(ct return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentGetOriginEndpointPolicyOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentGetOriginEndpointOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2380,7 +2776,7 @@ func (m *awsRestjson1_deserializeOpGetOriginEndpointPolicy) HandleDeserialize(ct return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetOriginEndpointPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetOriginEndpoint(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -2446,7 +2842,7 @@ func awsRestjson1_deserializeOpErrorGetOriginEndpointPolicy(response *smithyhttp } } -func awsRestjson1_deserializeOpDocumentGetOriginEndpointPolicyOutput(v **GetOriginEndpointPolicyOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetOriginEndpointOutput(v **GetOriginEndpointOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2459,15 +2855,24 @@ func awsRestjson1_deserializeOpDocumentGetOriginEndpointPolicyOutput(v **GetOrig return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetOriginEndpointPolicyOutput + var sv *GetOriginEndpointOutput if *v == nil { - sv = &GetOriginEndpointPolicyOutput{} + sv = &GetOriginEndpointOutput{} } else { sv = *v } for key, value := range shape { switch key { + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + case "ChannelGroupName": if value != nil { jtv, ok := value.(string) @@ -2486,6 +2891,85 @@ func awsRestjson1_deserializeOpDocumentGetOriginEndpointPolicyOutput(v **GetOrig sv.ChannelName = ptr.String(jtv) } + case "ContainerType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ContainerType to be of type string, got %T instead", value) + } + sv.ContainerType = types.ContainerType(jtv) + } + + case "CreatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "DashManifests": + if err := awsRestjson1_deserializeDocumentGetDashManifests(&sv.DashManifests, value); err != nil { + return err + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceDescription to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "ETag": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EntityTag to be of type string, got %T instead", value) + } + sv.ETag = ptr.String(jtv) + } + + case "ForceEndpointErrorConfiguration": + if err := awsRestjson1_deserializeDocumentForceEndpointErrorConfiguration(&sv.ForceEndpointErrorConfiguration, value); err != nil { + return err + } + + case "HlsManifests": + if err := awsRestjson1_deserializeDocumentGetHlsManifests(&sv.HlsManifests, value); err != nil { + return err + } + + case "LowLatencyHlsManifests": + if err := awsRestjson1_deserializeDocumentGetLowLatencyHlsManifests(&sv.LowLatencyHlsManifests, value); err != nil { + return err + } + + case "ModifiedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ModifiedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + case "OriginEndpointName": if value != nil { jtv, ok := value.(string) @@ -2495,13 +2979,27 @@ func awsRestjson1_deserializeOpDocumentGetOriginEndpointPolicyOutput(v **GetOrig sv.OriginEndpointName = ptr.String(jtv) } - case "Policy": + case "Segment": + if err := awsRestjson1_deserializeDocumentSegment(&sv.Segment, value); err != nil { + return err + } + + case "StartoverWindowSeconds": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected PolicyText to be of type string, got %T instead", value) + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) } - sv.Policy = ptr.String(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StartoverWindowSeconds = ptr.Int32(int32(i64)) + } + + case "Tags": + if err := awsRestjson1_deserializeDocumentTagMap(&sv.Tags, value); err != nil { + return err } default: @@ -2513,14 +3011,14 @@ func awsRestjson1_deserializeOpDocumentGetOriginEndpointPolicyOutput(v **GetOrig return nil } -type awsRestjson1_deserializeOpListChannelGroups struct { +type awsRestjson1_deserializeOpGetOriginEndpointPolicy struct { } -func (*awsRestjson1_deserializeOpListChannelGroups) ID() string { +func (*awsRestjson1_deserializeOpGetOriginEndpointPolicy) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListChannelGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetOriginEndpointPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2538,9 +3036,9 @@ func (m *awsRestjson1_deserializeOpListChannelGroups) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListChannelGroups(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetOriginEndpointPolicy(response, &metadata) } - output := &ListChannelGroupsOutput{} + output := &GetOriginEndpointPolicyOutput{} out.Result = output var buff [1024]byte @@ -2561,7 +3059,7 @@ func (m *awsRestjson1_deserializeOpListChannelGroups) HandleDeserialize(ctx cont return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListChannelGroupsOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentGetOriginEndpointPolicyOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2575,7 +3073,7 @@ func (m *awsRestjson1_deserializeOpListChannelGroups) HandleDeserialize(ctx cont return out, metadata, err } -func awsRestjson1_deserializeOpErrorListChannelGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetOriginEndpointPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -2622,6 +3120,9 @@ func awsRestjson1_deserializeOpErrorListChannelGroups(response *smithyhttp.Respo case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + case strings.EqualFold("ThrottlingException", errorCode): return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) @@ -2638,7 +3139,7 @@ func awsRestjson1_deserializeOpErrorListChannelGroups(response *smithyhttp.Respo } } -func awsRestjson1_deserializeOpDocumentListChannelGroupsOutput(v **ListChannelGroupsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetOriginEndpointPolicyOutput(v **GetOriginEndpointPolicyOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2651,27 +3152,49 @@ func awsRestjson1_deserializeOpDocumentListChannelGroupsOutput(v **ListChannelGr return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListChannelGroupsOutput + var sv *GetOriginEndpointPolicyOutput if *v == nil { - sv = &ListChannelGroupsOutput{} + sv = &GetOriginEndpointPolicyOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "Items": - if err := awsRestjson1_deserializeDocumentChannelGroupsList(&sv.Items, value); err != nil { - return err + case "ChannelGroupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.ChannelGroupName = ptr.String(jtv) } - case "NextToken": + case "ChannelName": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) } - sv.NextToken = ptr.String(jtv) + sv.ChannelName = ptr.String(jtv) + } + + case "OriginEndpointName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.OriginEndpointName = ptr.String(jtv) + } + + case "Policy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyText to be of type string, got %T instead", value) + } + sv.Policy = ptr.String(jtv) } default: @@ -2683,14 +3206,14 @@ func awsRestjson1_deserializeOpDocumentListChannelGroupsOutput(v **ListChannelGr return nil } -type awsRestjson1_deserializeOpListChannels struct { +type awsRestjson1_deserializeOpListChannelGroups struct { } -func (*awsRestjson1_deserializeOpListChannels) ID() string { +func (*awsRestjson1_deserializeOpListChannelGroups) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListChannels) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListChannelGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2708,9 +3231,9 @@ func (m *awsRestjson1_deserializeOpListChannels) HandleDeserialize(ctx context.C } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListChannels(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListChannelGroups(response, &metadata) } - output := &ListChannelsOutput{} + output := &ListChannelGroupsOutput{} out.Result = output var buff [1024]byte @@ -2731,7 +3254,7 @@ func (m *awsRestjson1_deserializeOpListChannels) HandleDeserialize(ctx context.C return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListChannelsOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListChannelGroupsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2745,7 +3268,7 @@ func (m *awsRestjson1_deserializeOpListChannels) HandleDeserialize(ctx context.C return out, metadata, err } -func awsRestjson1_deserializeOpErrorListChannels(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListChannelGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -2792,9 +3315,6 @@ func awsRestjson1_deserializeOpErrorListChannels(response *smithyhttp.Response, case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - case strings.EqualFold("ThrottlingException", errorCode): return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) @@ -2811,7 +3331,7 @@ func awsRestjson1_deserializeOpErrorListChannels(response *smithyhttp.Response, } } -func awsRestjson1_deserializeOpDocumentListChannelsOutput(v **ListChannelsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListChannelGroupsOutput(v **ListChannelGroupsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2824,9 +3344,9 @@ func awsRestjson1_deserializeOpDocumentListChannelsOutput(v **ListChannelsOutput return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListChannelsOutput + var sv *ListChannelGroupsOutput if *v == nil { - sv = &ListChannelsOutput{} + sv = &ListChannelGroupsOutput{} } else { sv = *v } @@ -2834,7 +3354,7 @@ func awsRestjson1_deserializeOpDocumentListChannelsOutput(v **ListChannelsOutput for key, value := range shape { switch key { case "Items": - if err := awsRestjson1_deserializeDocumentChannelList(&sv.Items, value); err != nil { + if err := awsRestjson1_deserializeDocumentChannelGroupsList(&sv.Items, value); err != nil { return err } @@ -2856,14 +3376,14 @@ func awsRestjson1_deserializeOpDocumentListChannelsOutput(v **ListChannelsOutput return nil } -type awsRestjson1_deserializeOpListOriginEndpoints struct { +type awsRestjson1_deserializeOpListChannels struct { } -func (*awsRestjson1_deserializeOpListOriginEndpoints) ID() string { +func (*awsRestjson1_deserializeOpListChannels) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListOriginEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListChannels) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -2881,9 +3401,9 @@ func (m *awsRestjson1_deserializeOpListOriginEndpoints) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListOriginEndpoints(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListChannels(response, &metadata) } - output := &ListOriginEndpointsOutput{} + output := &ListChannelsOutput{} out.Result = output var buff [1024]byte @@ -2904,7 +3424,7 @@ func (m *awsRestjson1_deserializeOpListOriginEndpoints) HandleDeserialize(ctx co return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListOriginEndpointsOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListChannelsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -2918,7 +3438,7 @@ func (m *awsRestjson1_deserializeOpListOriginEndpoints) HandleDeserialize(ctx co return out, metadata, err } -func awsRestjson1_deserializeOpErrorListOriginEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListChannels(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -2984,7 +3504,7 @@ func awsRestjson1_deserializeOpErrorListOriginEndpoints(response *smithyhttp.Res } } -func awsRestjson1_deserializeOpDocumentListOriginEndpointsOutput(v **ListOriginEndpointsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListChannelsOutput(v **ListChannelsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -2997,9 +3517,9 @@ func awsRestjson1_deserializeOpDocumentListOriginEndpointsOutput(v **ListOriginE return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListOriginEndpointsOutput + var sv *ListChannelsOutput if *v == nil { - sv = &ListOriginEndpointsOutput{} + sv = &ListChannelsOutput{} } else { sv = *v } @@ -3007,7 +3527,7 @@ func awsRestjson1_deserializeOpDocumentListOriginEndpointsOutput(v **ListOriginE for key, value := range shape { switch key { case "Items": - if err := awsRestjson1_deserializeDocumentOriginEndpointsList(&sv.Items, value); err != nil { + if err := awsRestjson1_deserializeDocumentChannelList(&sv.Items, value); err != nil { return err } @@ -3029,14 +3549,14 @@ func awsRestjson1_deserializeOpDocumentListOriginEndpointsOutput(v **ListOriginE return nil } -type awsRestjson1_deserializeOpListTagsForResource struct { +type awsRestjson1_deserializeOpListHarvestJobs struct { } -func (*awsRestjson1_deserializeOpListTagsForResource) ID() string { +func (*awsRestjson1_deserializeOpListHarvestJobs) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListHarvestJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3054,9 +3574,9 @@ func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListTagsForResource(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListHarvestJobs(response, &metadata) } - output := &ListTagsForResourceOutput{} + output := &ListHarvestJobsOutput{} out.Result = output var buff [1024]byte @@ -3077,7 +3597,7 @@ func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx co return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListHarvestJobsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3091,7 +3611,7 @@ func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx co return out, metadata, err } -func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListHarvestJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3132,6 +3652,18 @@ func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Res } switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + case strings.EqualFold("ValidationException", errorCode): return awsRestjson1_deserializeErrorValidationException(response, errorBody) @@ -3145,7 +3677,7 @@ func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Res } } -func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListHarvestJobsOutput(v **ListHarvestJobsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -3158,20 +3690,29 @@ func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsFor return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListTagsForResourceOutput + var sv *ListHarvestJobsOutput if *v == nil { - sv = &ListTagsForResourceOutput{} + sv = &ListHarvestJobsOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "tags": - if err := awsRestjson1_deserializeDocumentTagMap(&sv.Tags, value); err != nil { + case "Items": + if err := awsRestjson1_deserializeDocumentHarvestJobsList(&sv.Items, value); err != nil { return err } + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + default: _, _ = key, value @@ -3181,14 +3722,14 @@ func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsFor return nil } -type awsRestjson1_deserializeOpPutChannelPolicy struct { +type awsRestjson1_deserializeOpListOriginEndpoints struct { } -func (*awsRestjson1_deserializeOpPutChannelPolicy) ID() string { +func (*awsRestjson1_deserializeOpListOriginEndpoints) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpPutChannelPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListOriginEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3206,16 +3747,44 @@ func (m *awsRestjson1_deserializeOpPutChannelPolicy) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorPutChannelPolicy(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListOriginEndpoints(response, &metadata) } - output := &PutChannelPolicyOutput{} + output := &ListOriginEndpointsOutput{} out.Result = output + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListOriginEndpointsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + span.End() return out, metadata, err } -func awsRestjson1_deserializeOpErrorPutChannelPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListOriginEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3259,9 +3828,6 @@ func awsRestjson1_deserializeOpErrorPutChannelPolicy(response *smithyhttp.Respon case strings.EqualFold("AccessDeniedException", errorCode): return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) - case strings.EqualFold("ConflictException", errorCode): - return awsRestjson1_deserializeErrorConflictException(response, errorBody) - case strings.EqualFold("InternalServerException", errorCode): return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) @@ -3284,14 +3850,59 @@ func awsRestjson1_deserializeOpErrorPutChannelPolicy(response *smithyhttp.Respon } } -type awsRestjson1_deserializeOpPutOriginEndpointPolicy struct { +func awsRestjson1_deserializeOpDocumentListOriginEndpointsOutput(v **ListOriginEndpointsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListOriginEndpointsOutput + if *v == nil { + sv = &ListOriginEndpointsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Items": + if err := awsRestjson1_deserializeDocumentOriginEndpointsList(&sv.Items, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil } -func (*awsRestjson1_deserializeOpPutOriginEndpointPolicy) ID() string { +type awsRestjson1_deserializeOpListTagsForResource struct { +} + +func (*awsRestjson1_deserializeOpListTagsForResource) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpPutOriginEndpointPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3309,16 +3920,44 @@ func (m *awsRestjson1_deserializeOpPutOriginEndpointPolicy) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorPutOriginEndpointPolicy(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListTagsForResource(response, &metadata) } - output := &PutOriginEndpointPolicyOutput{} + output := &ListTagsForResourceOutput{} out.Result = output + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + span.End() return out, metadata, err } -func awsRestjson1_deserializeOpErrorPutOriginEndpointPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3359,21 +3998,6 @@ func awsRestjson1_deserializeOpErrorPutOriginEndpointPolicy(response *smithyhttp } switch { - case strings.EqualFold("AccessDeniedException", errorCode): - return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) - - case strings.EqualFold("ConflictException", errorCode): - return awsRestjson1_deserializeErrorConflictException(response, errorBody) - - case strings.EqualFold("InternalServerException", errorCode): - return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) - - case strings.EqualFold("ResourceNotFoundException", errorCode): - return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) - - case strings.EqualFold("ThrottlingException", errorCode): - return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) - case strings.EqualFold("ValidationException", errorCode): return awsRestjson1_deserializeErrorValidationException(response, errorBody) @@ -3387,14 +4011,50 @@ func awsRestjson1_deserializeOpErrorPutOriginEndpointPolicy(response *smithyhttp } } -type awsRestjson1_deserializeOpTagResource struct { +func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTagsForResourceOutput + if *v == nil { + sv = &ListTagsForResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "tags": + if err := awsRestjson1_deserializeDocumentTagMap(&sv.Tags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil } -func (*awsRestjson1_deserializeOpTagResource) ID() string { +type awsRestjson1_deserializeOpPutChannelPolicy struct { +} + +func (*awsRestjson1_deserializeOpPutChannelPolicy) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpPutChannelPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3412,22 +4072,16 @@ func (m *awsRestjson1_deserializeOpTagResource) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorTagResource(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorPutChannelPolicy(response, &metadata) } - output := &TagResourceOutput{} + output := &PutChannelPolicyOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - span.End() return out, metadata, err } -func awsRestjson1_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorPutChannelPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -3468,15 +4122,227 @@ func awsRestjson1_deserializeOpErrorTagResource(response *smithyhttp.Response, m } switch { - case strings.EqualFold("ValidationException", errorCode): - return awsRestjson1_deserializeErrorValidationException(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestjson1_deserializeOpPutOriginEndpointPolicy struct { +} + +func (*awsRestjson1_deserializeOpPutOriginEndpointPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpPutOriginEndpointPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorPutOriginEndpointPolicy(response, &metadata) + } + output := &PutOriginEndpointPolicyOutput{} + out.Result = output + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorPutOriginEndpointPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestjson1_deserializeOpTagResource struct { +} + +func (*awsRestjson1_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError } } @@ -5072,6 +5938,42 @@ func awsRestjson1_deserializeDocumentDashUtcTiming(v **types.DashUtcTiming, valu return nil } +func awsRestjson1_deserializeDocumentDestination(v **types.Destination, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Destination + if *v == nil { + sv = &types.Destination{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "S3Destination": + if err := awsRestjson1_deserializeDocumentS3DestinationConfig(&sv.S3Destination, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentDrmSystems(v *[]types.DrmSystem, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -5714,7 +6616,362 @@ func awsRestjson1_deserializeDocumentGetHlsManifestConfiguration(v **types.GetHl return nil } -func awsRestjson1_deserializeDocumentGetHlsManifests(v *[]types.GetHlsManifestConfiguration, value interface{}) error { +func awsRestjson1_deserializeDocumentGetHlsManifests(v *[]types.GetHlsManifestConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GetHlsManifestConfiguration + if *v == nil { + cv = []types.GetHlsManifestConfiguration{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GetHlsManifestConfiguration + destAddr := &col + if err := awsRestjson1_deserializeDocumentGetHlsManifestConfiguration(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentGetLowLatencyHlsManifestConfiguration(v **types.GetLowLatencyHlsManifestConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GetLowLatencyHlsManifestConfiguration + if *v == nil { + sv = &types.GetLowLatencyHlsManifestConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ChildManifestName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.ChildManifestName = ptr.String(jtv) + } + + case "FilterConfiguration": + if err := awsRestjson1_deserializeDocumentFilterConfiguration(&sv.FilterConfiguration, value); err != nil { + return err + } + + case "ManifestName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.ManifestName = ptr.String(jtv) + } + + case "ManifestWindowSeconds": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ManifestWindowSeconds = ptr.Int32(int32(i64)) + } + + case "ProgramDateTimeIntervalSeconds": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ProgramDateTimeIntervalSeconds = ptr.Int32(int32(i64)) + } + + case "ScteHls": + if err := awsRestjson1_deserializeDocumentScteHls(&sv.ScteHls, value); err != nil { + return err + } + + case "StartTag": + if err := awsRestjson1_deserializeDocumentStartTag(&sv.StartTag, value); err != nil { + return err + } + + case "Url": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Url = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentGetLowLatencyHlsManifests(v *[]types.GetLowLatencyHlsManifestConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GetLowLatencyHlsManifestConfiguration + if *v == nil { + cv = []types.GetLowLatencyHlsManifestConfiguration{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GetLowLatencyHlsManifestConfiguration + destAddr := &col + if err := awsRestjson1_deserializeDocumentGetLowLatencyHlsManifestConfiguration(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentHarvestedDashManifest(v **types.HarvestedDashManifest, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.HarvestedDashManifest + if *v == nil { + sv = &types.HarvestedDashManifest{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ManifestName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.ManifestName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentHarvestedDashManifestsList(v *[]types.HarvestedDashManifest, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.HarvestedDashManifest + if *v == nil { + cv = []types.HarvestedDashManifest{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.HarvestedDashManifest + destAddr := &col + if err := awsRestjson1_deserializeDocumentHarvestedDashManifest(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentHarvestedHlsManifest(v **types.HarvestedHlsManifest, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.HarvestedHlsManifest + if *v == nil { + sv = &types.HarvestedHlsManifest{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ManifestName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.ManifestName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentHarvestedHlsManifestsList(v *[]types.HarvestedHlsManifest, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.HarvestedHlsManifest + if *v == nil { + cv = []types.HarvestedHlsManifest{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.HarvestedHlsManifest + destAddr := &col + if err := awsRestjson1_deserializeDocumentHarvestedHlsManifest(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentHarvestedLowLatencyHlsManifest(v **types.HarvestedLowLatencyHlsManifest, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.HarvestedLowLatencyHlsManifest + if *v == nil { + sv = &types.HarvestedLowLatencyHlsManifest{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ManifestName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.ManifestName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentHarvestedLowLatencyHlsManifestsList(v *[]types.HarvestedLowLatencyHlsManifest, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -5727,17 +6984,17 @@ func awsRestjson1_deserializeDocumentGetHlsManifests(v *[]types.GetHlsManifestCo return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.GetHlsManifestConfiguration + var cv []types.HarvestedLowLatencyHlsManifest if *v == nil { - cv = []types.GetHlsManifestConfiguration{} + cv = []types.HarvestedLowLatencyHlsManifest{} } else { cv = *v } for _, value := range shape { - var col types.GetHlsManifestConfiguration + var col types.HarvestedLowLatencyHlsManifest destAddr := &col - if err := awsRestjson1_deserializeDocumentGetHlsManifestConfiguration(&destAddr, value); err != nil { + if err := awsRestjson1_deserializeDocumentHarvestedLowLatencyHlsManifest(&destAddr, value); err != nil { return err } col = *destAddr @@ -5748,7 +7005,7 @@ func awsRestjson1_deserializeDocumentGetHlsManifests(v *[]types.GetHlsManifestCo return nil } -func awsRestjson1_deserializeDocumentGetLowLatencyHlsManifestConfiguration(v **types.GetLowLatencyHlsManifestConfiguration, value interface{}) error { +func awsRestjson1_deserializeDocumentHarvestedManifests(v **types.HarvestedManifests, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -5761,81 +7018,250 @@ func awsRestjson1_deserializeDocumentGetLowLatencyHlsManifestConfiguration(v **t return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.GetLowLatencyHlsManifestConfiguration + var sv *types.HarvestedManifests if *v == nil { - sv = &types.GetLowLatencyHlsManifestConfiguration{} + sv = &types.HarvestedManifests{} } else { sv = *v } for key, value := range shape { switch key { - case "ChildManifestName": + case "DashManifests": + if err := awsRestjson1_deserializeDocumentHarvestedDashManifestsList(&sv.DashManifests, value); err != nil { + return err + } + + case "HlsManifests": + if err := awsRestjson1_deserializeDocumentHarvestedHlsManifestsList(&sv.HlsManifests, value); err != nil { + return err + } + + case "LowLatencyHlsManifests": + if err := awsRestjson1_deserializeDocumentHarvestedLowLatencyHlsManifestsList(&sv.LowLatencyHlsManifests, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentHarvesterScheduleConfiguration(v **types.HarvesterScheduleConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.HarvesterScheduleConfiguration + if *v == nil { + sv = &types.HarvesterScheduleConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EndTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.EndTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "StartTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.StartTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentHarvestJob(v **types.HarvestJob, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.HarvestJob + if *v == nil { + sv = &types.HarvestJob{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Arn": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + return fmt.Errorf("expected String to be of type string, got %T instead", value) } - sv.ChildManifestName = ptr.String(jtv) + sv.Arn = ptr.String(jtv) } - case "FilterConfiguration": - if err := awsRestjson1_deserializeDocumentFilterConfiguration(&sv.FilterConfiguration, value); err != nil { - return err + case "ChannelGroupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.ChannelGroupName = ptr.String(jtv) } - case "ManifestName": + case "ChannelName": if value != nil { jtv, ok := value.(string) if !ok { return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) } - sv.ManifestName = ptr.String(jtv) + sv.ChannelName = ptr.String(jtv) } - case "ManifestWindowSeconds": + case "CreatedAt": if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + } - i64, err := jtv.Int64() - if err != nil { - return err + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceDescription to be of type string, got %T instead", value) } - sv.ManifestWindowSeconds = ptr.Int32(int32(i64)) + sv.Description = ptr.String(jtv) } - case "ProgramDateTimeIntervalSeconds": + case "Destination": + if err := awsRestjson1_deserializeDocumentDestination(&sv.Destination, value); err != nil { + return err + } + + case "ErrorMessage": if value != nil { - jtv, ok := value.(json.Number) + jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + return fmt.Errorf("expected String to be of type string, got %T instead", value) } - i64, err := jtv.Int64() - if err != nil { - return err + sv.ErrorMessage = ptr.String(jtv) + } + + case "ETag": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EntityTag to be of type string, got %T instead", value) } - sv.ProgramDateTimeIntervalSeconds = ptr.Int32(int32(i64)) + sv.ETag = ptr.String(jtv) } - case "ScteHls": - if err := awsRestjson1_deserializeDocumentScteHls(&sv.ScteHls, value); err != nil { + case "HarvestedManifests": + if err := awsRestjson1_deserializeDocumentHarvestedManifests(&sv.HarvestedManifests, value); err != nil { return err } - case "StartTag": - if err := awsRestjson1_deserializeDocumentStartTag(&sv.StartTag, value); err != nil { + case "HarvestJobName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.HarvestJobName = ptr.String(jtv) + } + + case "ModifiedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ModifiedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "OriginEndpointName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceName to be of type string, got %T instead", value) + } + sv.OriginEndpointName = ptr.String(jtv) + } + + case "ScheduleConfiguration": + if err := awsRestjson1_deserializeDocumentHarvesterScheduleConfiguration(&sv.ScheduleConfiguration, value); err != nil { return err } - case "Url": + case "Status": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected HarvestJobStatus to be of type string, got %T instead", value) } - sv.Url = ptr.String(jtv) + sv.Status = types.HarvestJobStatus(jtv) } default: @@ -5847,7 +7273,7 @@ func awsRestjson1_deserializeDocumentGetLowLatencyHlsManifestConfiguration(v **t return nil } -func awsRestjson1_deserializeDocumentGetLowLatencyHlsManifests(v *[]types.GetLowLatencyHlsManifestConfiguration, value interface{}) error { +func awsRestjson1_deserializeDocumentHarvestJobsList(v *[]types.HarvestJob, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -5860,17 +7286,17 @@ func awsRestjson1_deserializeDocumentGetLowLatencyHlsManifests(v *[]types.GetLow return fmt.Errorf("unexpected JSON type %v", value) } - var cv []types.GetLowLatencyHlsManifestConfiguration + var cv []types.HarvestJob if *v == nil { - cv = []types.GetLowLatencyHlsManifestConfiguration{} + cv = []types.HarvestJob{} } else { cv = *v } for _, value := range shape { - var col types.GetLowLatencyHlsManifestConfiguration + var col types.HarvestJob destAddr := &col - if err := awsRestjson1_deserializeDocumentGetLowLatencyHlsManifestConfiguration(&destAddr, value); err != nil { + if err := awsRestjson1_deserializeDocumentHarvestJob(&destAddr, value); err != nil { return err } col = *destAddr @@ -6491,6 +7917,55 @@ func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.Resourc return nil } +func awsRestjson1_deserializeDocumentS3DestinationConfig(v **types.S3DestinationConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.S3DestinationConfig + if *v == nil { + sv = &types.S3DestinationConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BucketName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3BucketName to be of type string, got %T instead", value) + } + sv.BucketName = ptr.String(jtv) + } + + case "DestinationPath": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3DestinationPath to be of type string, got %T instead", value) + } + sv.DestinationPath = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentScte(v **types.Scte, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/mediapackagev2/generated.json b/service/mediapackagev2/generated.json index 2024bbe68f9..ac2472eb7b6 100644 --- a/service/mediapackagev2/generated.json +++ b/service/mediapackagev2/generated.json @@ -3,13 +3,16 @@ "github.com/aws/aws-sdk-go-v2": "v1.4.0", "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0" + "github.com/aws/smithy-go": "v1.4.0", + "github.com/jmespath/go-jmespath": "v0.4.0" }, "files": [ "api_client.go", "api_client_test.go", + "api_op_CancelHarvestJob.go", "api_op_CreateChannel.go", "api_op_CreateChannelGroup.go", + "api_op_CreateHarvestJob.go", "api_op_CreateOriginEndpoint.go", "api_op_DeleteChannel.go", "api_op_DeleteChannelGroup.go", @@ -19,10 +22,12 @@ "api_op_GetChannel.go", "api_op_GetChannelGroup.go", "api_op_GetChannelPolicy.go", + "api_op_GetHarvestJob.go", "api_op_GetOriginEndpoint.go", "api_op_GetOriginEndpointPolicy.go", "api_op_ListChannelGroups.go", "api_op_ListChannels.go", + "api_op_ListHarvestJobs.go", "api_op_ListOriginEndpoints.go", "api_op_ListTagsForResource.go", "api_op_PutChannelPolicy.go", diff --git a/service/mediapackagev2/go.mod b/service/mediapackagev2/go.mod index b98c429dfb4..91128a9b1b2 100644 --- a/service/mediapackagev2/go.mod +++ b/service/mediapackagev2/go.mod @@ -7,6 +7,7 @@ require ( github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 github.com/aws/smithy-go v1.22.0 + github.com/jmespath/go-jmespath v0.4.0 ) replace github.com/aws/aws-sdk-go-v2 => ../../ diff --git a/service/mediapackagev2/go.sum b/service/mediapackagev2/go.sum index 70a20636e37..609e6296b2c 100644 --- a/service/mediapackagev2/go.sum +++ b/service/mediapackagev2/go.sum @@ -1,2 +1,14 @@ github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/service/mediapackagev2/serializers.go b/service/mediapackagev2/serializers.go index 483c76ada7e..86adbf441e3 100644 --- a/service/mediapackagev2/serializers.go +++ b/service/mediapackagev2/serializers.go @@ -17,6 +17,109 @@ import ( "math" ) +type awsRestjson1_serializeOpCancelHarvestJob struct { +} + +func (*awsRestjson1_serializeOpCancelHarvestJob) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCancelHarvestJob) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CancelHarvestJobInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}/harvestJob/{HarvestJobName}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsCancelHarvestJobInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCancelHarvestJobInput(v *CancelHarvestJobInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ChannelGroupName == nil || len(*v.ChannelGroupName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member ChannelGroupName must not be empty")} + } + if v.ChannelGroupName != nil { + if err := encoder.SetURI("ChannelGroupName").String(*v.ChannelGroupName); err != nil { + return err + } + } + + if v.ChannelName == nil || len(*v.ChannelName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member ChannelName must not be empty")} + } + if v.ChannelName != nil { + if err := encoder.SetURI("ChannelName").String(*v.ChannelName); err != nil { + return err + } + } + + if v.ETag != nil { + locationName := "X-Amzn-Update-If-Match" + encoder.SetHeader(locationName).String(*v.ETag) + } + + if v.HarvestJobName == nil || len(*v.HarvestJobName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member HarvestJobName must not be empty")} + } + if v.HarvestJobName != nil { + if err := encoder.SetURI("HarvestJobName").String(*v.HarvestJobName); err != nil { + return err + } + } + + if v.OriginEndpointName == nil || len(*v.OriginEndpointName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member OriginEndpointName must not be empty")} + } + if v.OriginEndpointName != nil { + if err := encoder.SetURI("OriginEndpointName").String(*v.OriginEndpointName); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpCreateChannel struct { } @@ -235,6 +338,156 @@ func awsRestjson1_serializeOpDocumentCreateChannelGroupInput(v *CreateChannelGro return nil } +type awsRestjson1_serializeOpCreateHarvestJob struct { +} + +func (*awsRestjson1_serializeOpCreateHarvestJob) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateHarvestJob) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateHarvestJobInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}/harvestJob") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsCreateHarvestJobInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateHarvestJobInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateHarvestJobInput(v *CreateHarvestJobInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ChannelGroupName == nil || len(*v.ChannelGroupName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member ChannelGroupName must not be empty")} + } + if v.ChannelGroupName != nil { + if err := encoder.SetURI("ChannelGroupName").String(*v.ChannelGroupName); err != nil { + return err + } + } + + if v.ChannelName == nil || len(*v.ChannelName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member ChannelName must not be empty")} + } + if v.ChannelName != nil { + if err := encoder.SetURI("ChannelName").String(*v.ChannelName); err != nil { + return err + } + } + + if v.ClientToken != nil { + locationName := "X-Amzn-Client-Token" + encoder.SetHeader(locationName).String(*v.ClientToken) + } + + if v.OriginEndpointName == nil || len(*v.OriginEndpointName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member OriginEndpointName must not be empty")} + } + if v.OriginEndpointName != nil { + if err := encoder.SetURI("OriginEndpointName").String(*v.OriginEndpointName); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateHarvestJobInput(v *CreateHarvestJobInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.Destination != nil { + ok := object.Key("Destination") + if err := awsRestjson1_serializeDocumentDestination(v.Destination, ok); err != nil { + return err + } + } + + if v.HarvestedManifests != nil { + ok := object.Key("HarvestedManifests") + if err := awsRestjson1_serializeDocumentHarvestedManifests(v.HarvestedManifests, ok); err != nil { + return err + } + } + + if v.HarvestJobName != nil { + ok := object.Key("HarvestJobName") + ok.String(*v.HarvestJobName) + } + + if v.ScheduleConfiguration != nil { + ok := object.Key("ScheduleConfiguration") + if err := awsRestjson1_serializeDocumentHarvesterScheduleConfiguration(v.ScheduleConfiguration, ok); err != nil { + return err + } + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsRestjson1_serializeDocumentTagMap(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpCreateOriginEndpoint struct { } @@ -1040,6 +1293,104 @@ func awsRestjson1_serializeOpHttpBindingsGetChannelPolicyInput(v *GetChannelPoli return nil } +type awsRestjson1_serializeOpGetHarvestJob struct { +} + +func (*awsRestjson1_serializeOpGetHarvestJob) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetHarvestJob) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetHarvestJobInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/channelGroup/{ChannelGroupName}/channel/{ChannelName}/originEndpoint/{OriginEndpointName}/harvestJob/{HarvestJobName}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetHarvestJobInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetHarvestJobInput(v *GetHarvestJobInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ChannelGroupName == nil || len(*v.ChannelGroupName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member ChannelGroupName must not be empty")} + } + if v.ChannelGroupName != nil { + if err := encoder.SetURI("ChannelGroupName").String(*v.ChannelGroupName); err != nil { + return err + } + } + + if v.ChannelName == nil || len(*v.ChannelName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member ChannelName must not be empty")} + } + if v.ChannelName != nil { + if err := encoder.SetURI("ChannelName").String(*v.ChannelName); err != nil { + return err + } + } + + if v.HarvestJobName == nil || len(*v.HarvestJobName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member HarvestJobName must not be empty")} + } + if v.HarvestJobName != nil { + if err := encoder.SetURI("HarvestJobName").String(*v.HarvestJobName); err != nil { + return err + } + } + + if v.OriginEndpointName == nil || len(*v.OriginEndpointName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member OriginEndpointName must not be empty")} + } + if v.OriginEndpointName != nil { + if err := encoder.SetURI("OriginEndpointName").String(*v.OriginEndpointName); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpGetOriginEndpoint struct { } @@ -1367,6 +1718,97 @@ func awsRestjson1_serializeOpHttpBindingsListChannelsInput(v *ListChannelsInput, return nil } +type awsRestjson1_serializeOpListHarvestJobs struct { +} + +func (*awsRestjson1_serializeOpListHarvestJobs) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListHarvestJobs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListHarvestJobsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/channelGroup/{ChannelGroupName}/harvestJob") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListHarvestJobsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListHarvestJobsInput(v *ListHarvestJobsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ChannelGroupName == nil || len(*v.ChannelGroupName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member ChannelGroupName must not be empty")} + } + if v.ChannelGroupName != nil { + if err := encoder.SetURI("ChannelGroupName").String(*v.ChannelGroupName); err != nil { + return err + } + } + + if v.ChannelName != nil { + encoder.SetQuery("channelName").String(*v.ChannelName) + } + + if v.MaxResults != nil { + encoder.SetQuery("maxResults").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("nextToken").String(*v.NextToken) + } + + if v.OriginEndpointName != nil { + encoder.SetQuery("originEndpointName").String(*v.OriginEndpointName) + } + + if len(v.Status) > 0 { + encoder.SetQuery("includeStatus").String(string(v.Status)) + } + + return nil +} + type awsRestjson1_serializeOpListOriginEndpoints struct { } @@ -2516,6 +2958,20 @@ func awsRestjson1_serializeDocumentDashUtcTiming(v *types.DashUtcTiming, value s return nil } +func awsRestjson1_serializeDocumentDestination(v *types.Destination, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.S3Destination != nil { + ok := object.Key("S3Destination") + if err := awsRestjson1_serializeDocumentS3DestinationConfig(v.S3Destination, ok); err != nil { + return err + } + } + + return nil +} + func awsRestjson1_serializeDocumentDrmSystems(v []types.DrmSystem, value smithyjson.Value) error { array := value.Array() defer array.Close() @@ -2649,6 +3105,143 @@ func awsRestjson1_serializeDocumentForceEndpointErrorConfiguration(v *types.Forc return nil } +func awsRestjson1_serializeDocumentHarvestedDashManifest(v *types.HarvestedDashManifest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ManifestName != nil { + ok := object.Key("ManifestName") + ok.String(*v.ManifestName) + } + + return nil +} + +func awsRestjson1_serializeDocumentHarvestedDashManifestsList(v []types.HarvestedDashManifest, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentHarvestedDashManifest(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocumentHarvestedHlsManifest(v *types.HarvestedHlsManifest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ManifestName != nil { + ok := object.Key("ManifestName") + ok.String(*v.ManifestName) + } + + return nil +} + +func awsRestjson1_serializeDocumentHarvestedHlsManifestsList(v []types.HarvestedHlsManifest, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentHarvestedHlsManifest(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocumentHarvestedLowLatencyHlsManifest(v *types.HarvestedLowLatencyHlsManifest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ManifestName != nil { + ok := object.Key("ManifestName") + ok.String(*v.ManifestName) + } + + return nil +} + +func awsRestjson1_serializeDocumentHarvestedLowLatencyHlsManifestsList(v []types.HarvestedLowLatencyHlsManifest, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentHarvestedLowLatencyHlsManifest(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocumentHarvestedManifests(v *types.HarvestedManifests, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DashManifests != nil { + ok := object.Key("DashManifests") + if err := awsRestjson1_serializeDocumentHarvestedDashManifestsList(v.DashManifests, ok); err != nil { + return err + } + } + + if v.HlsManifests != nil { + ok := object.Key("HlsManifests") + if err := awsRestjson1_serializeDocumentHarvestedHlsManifestsList(v.HlsManifests, ok); err != nil { + return err + } + } + + if v.LowLatencyHlsManifests != nil { + ok := object.Key("LowLatencyHlsManifests") + if err := awsRestjson1_serializeDocumentHarvestedLowLatencyHlsManifestsList(v.LowLatencyHlsManifests, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentHarvesterScheduleConfiguration(v *types.HarvesterScheduleConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EndTime != nil { + ok := object.Key("EndTime") + ok.Double(smithytime.FormatEpochSeconds(*v.EndTime)) + } + + if v.StartTime != nil { + ok := object.Key("StartTime") + ok.Double(smithytime.FormatEpochSeconds(*v.StartTime)) + } + + return nil +} + +func awsRestjson1_serializeDocumentS3DestinationConfig(v *types.S3DestinationConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BucketName != nil { + ok := object.Key("BucketName") + ok.String(*v.BucketName) + } + + if v.DestinationPath != nil { + ok := object.Key("DestinationPath") + ok.String(*v.DestinationPath) + } + + return nil +} + func awsRestjson1_serializeDocumentScte(v *types.Scte, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/mediapackagev2/snapshot/api_op_CancelHarvestJob.go.snap b/service/mediapackagev2/snapshot/api_op_CancelHarvestJob.go.snap new file mode 100644 index 00000000000..6b2b7250048 --- /dev/null +++ b/service/mediapackagev2/snapshot/api_op_CancelHarvestJob.go.snap @@ -0,0 +1,41 @@ +CancelHarvestJob + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/mediapackagev2/snapshot/api_op_CreateHarvestJob.go.snap b/service/mediapackagev2/snapshot/api_op_CreateHarvestJob.go.snap new file mode 100644 index 00000000000..b59aec79f32 --- /dev/null +++ b/service/mediapackagev2/snapshot/api_op_CreateHarvestJob.go.snap @@ -0,0 +1,42 @@ +CreateHarvestJob + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + OperationIdempotencyTokenAutoFill + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/mediapackagev2/snapshot/api_op_GetHarvestJob.go.snap b/service/mediapackagev2/snapshot/api_op_GetHarvestJob.go.snap new file mode 100644 index 00000000000..46cd1fe30f1 --- /dev/null +++ b/service/mediapackagev2/snapshot/api_op_GetHarvestJob.go.snap @@ -0,0 +1,41 @@ +GetHarvestJob + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/mediapackagev2/snapshot/api_op_ListHarvestJobs.go.snap b/service/mediapackagev2/snapshot/api_op_ListHarvestJobs.go.snap new file mode 100644 index 00000000000..806fdbbaddd --- /dev/null +++ b/service/mediapackagev2/snapshot/api_op_ListHarvestJobs.go.snap @@ -0,0 +1,41 @@ +ListHarvestJobs + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/mediapackagev2/snapshot_test.go b/service/mediapackagev2/snapshot_test.go index fd0ec4d2232..0ff7782ad08 100644 --- a/service/mediapackagev2/snapshot_test.go +++ b/service/mediapackagev2/snapshot_test.go @@ -62,6 +62,18 @@ func testSnapshot(stack *middleware.Stack, operation string) error { } return snapshotOK{} } +func TestCheckSnapshot_CancelHarvestJob(t *testing.T) { + svc := New(Options{}) + _, err := svc.CancelHarvestJob(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "CancelHarvestJob") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_CreateChannel(t *testing.T) { svc := New(Options{}) _, err := svc.CreateChannel(context.Background(), nil, func(o *Options) { @@ -86,6 +98,18 @@ func TestCheckSnapshot_CreateChannelGroup(t *testing.T) { } } +func TestCheckSnapshot_CreateHarvestJob(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateHarvestJob(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "CreateHarvestJob") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_CreateOriginEndpoint(t *testing.T) { svc := New(Options{}) _, err := svc.CreateOriginEndpoint(context.Background(), nil, func(o *Options) { @@ -194,6 +218,18 @@ func TestCheckSnapshot_GetChannelPolicy(t *testing.T) { } } +func TestCheckSnapshot_GetHarvestJob(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetHarvestJob(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "GetHarvestJob") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_GetOriginEndpoint(t *testing.T) { svc := New(Options{}) _, err := svc.GetOriginEndpoint(context.Background(), nil, func(o *Options) { @@ -242,6 +278,18 @@ func TestCheckSnapshot_ListChannels(t *testing.T) { } } +func TestCheckSnapshot_ListHarvestJobs(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListHarvestJobs(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListHarvestJobs") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_ListOriginEndpoints(t *testing.T) { svc := New(Options{}) _, err := svc.ListOriginEndpoints(context.Background(), nil, func(o *Options) { @@ -349,6 +397,18 @@ func TestCheckSnapshot_UpdateOriginEndpoint(t *testing.T) { t.Fatal(err) } } +func TestUpdateSnapshot_CancelHarvestJob(t *testing.T) { + svc := New(Options{}) + _, err := svc.CancelHarvestJob(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "CancelHarvestJob") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_CreateChannel(t *testing.T) { svc := New(Options{}) _, err := svc.CreateChannel(context.Background(), nil, func(o *Options) { @@ -373,6 +433,18 @@ func TestUpdateSnapshot_CreateChannelGroup(t *testing.T) { } } +func TestUpdateSnapshot_CreateHarvestJob(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateHarvestJob(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "CreateHarvestJob") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_CreateOriginEndpoint(t *testing.T) { svc := New(Options{}) _, err := svc.CreateOriginEndpoint(context.Background(), nil, func(o *Options) { @@ -481,6 +553,18 @@ func TestUpdateSnapshot_GetChannelPolicy(t *testing.T) { } } +func TestUpdateSnapshot_GetHarvestJob(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetHarvestJob(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "GetHarvestJob") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_GetOriginEndpoint(t *testing.T) { svc := New(Options{}) _, err := svc.GetOriginEndpoint(context.Background(), nil, func(o *Options) { @@ -529,6 +613,18 @@ func TestUpdateSnapshot_ListChannels(t *testing.T) { } } +func TestUpdateSnapshot_ListHarvestJobs(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListHarvestJobs(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListHarvestJobs") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_ListOriginEndpoints(t *testing.T) { svc := New(Options{}) _, err := svc.ListOriginEndpoints(context.Background(), nil, func(o *Options) { diff --git a/service/mediapackagev2/types/enums.go b/service/mediapackagev2/types/enums.go index bff531058b8..a732280f228 100644 --- a/service/mediapackagev2/types/enums.go +++ b/service/mediapackagev2/types/enums.go @@ -231,6 +231,31 @@ func (EndpointErrorCondition) Values() []EndpointErrorCondition { } } +type HarvestJobStatus string + +// Enum values for HarvestJobStatus +const ( + HarvestJobStatusQueued HarvestJobStatus = "QUEUED" + HarvestJobStatusInProgress HarvestJobStatus = "IN_PROGRESS" + HarvestJobStatusCancelled HarvestJobStatus = "CANCELLED" + HarvestJobStatusCompleted HarvestJobStatus = "COMPLETED" + HarvestJobStatusFailed HarvestJobStatus = "FAILED" +) + +// Values returns all known values for HarvestJobStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (HarvestJobStatus) Values() []HarvestJobStatus { + return []HarvestJobStatus{ + "QUEUED", + "IN_PROGRESS", + "CANCELLED", + "COMPLETED", + "FAILED", + } +} + type InputType string // Enum values for InputType @@ -317,6 +342,7 @@ const ( ResourceTypeNotFoundChannelGroup ResourceTypeNotFound = "CHANNEL_GROUP" ResourceTypeNotFoundChannel ResourceTypeNotFound = "CHANNEL" ResourceTypeNotFoundOriginEndpoint ResourceTypeNotFound = "ORIGIN_ENDPOINT" + ResourceTypeNotFoundHarvestJob ResourceTypeNotFound = "HARVEST_JOB" ) // Values returns all known values for ResourceTypeNotFound. Note that this can be @@ -328,6 +354,7 @@ func (ResourceTypeNotFound) Values() []ResourceTypeNotFound { "CHANNEL_GROUP", "CHANNEL", "ORIGIN_ENDPOINT", + "HARVEST_JOB", } } @@ -434,6 +461,14 @@ const ( ValidationExceptionTypeDrmSignalingMismatchSegmentEncryptionStatus ValidationExceptionType = "DRM_SIGNALING_MISMATCH_SEGMENT_ENCRYPTION_STATUS" ValidationExceptionTypeOnlyCmafInputTypeAllowForceEndpointErrorConfiguration ValidationExceptionType = "ONLY_CMAF_INPUT_TYPE_ALLOW_FORCE_ENDPOINT_ERROR_CONFIGURATION" ValidationExceptionTypeSourceDisruptionsEnabledIncorrectly ValidationExceptionType = "SOURCE_DISRUPTIONS_ENABLED_INCORRECTLY" + ValidationExceptionTypeHarvestedManifestHasStartEndFilterConfiguration ValidationExceptionType = "HARVESTED_MANIFEST_HAS_START_END_FILTER_CONFIGURATION" + ValidationExceptionTypeHarvestedManifestNotFoundOnEndpoint ValidationExceptionType = "HARVESTED_MANIFEST_NOT_FOUND_ON_ENDPOINT" + ValidationExceptionTypeTooManyInProgressHarvestJobs ValidationExceptionType = "TOO_MANY_IN_PROGRESS_HARVEST_JOBS" + ValidationExceptionTypeHarvestJobIneligibleForCancellation ValidationExceptionType = "HARVEST_JOB_INELIGIBLE_FOR_CANCELLATION" + ValidationExceptionTypeInvalidHarvestJobDuration ValidationExceptionType = "INVALID_HARVEST_JOB_DURATION" + ValidationExceptionTypeHarvestJobS3DestinationMissingOrIncomplete ValidationExceptionType = "HARVEST_JOB_S3_DESTINATION_MISSING_OR_INCOMPLETE" + ValidationExceptionTypeHarvestJobUnableToWriteToS3Destination ValidationExceptionType = "HARVEST_JOB_UNABLE_TO_WRITE_TO_S3_DESTINATION" + ValidationExceptionTypeHarvestJobCustomerEndpointReadAccessDenied ValidationExceptionType = "HARVEST_JOB_CUSTOMER_ENDPOINT_READ_ACCESS_DENIED" ValidationExceptionTypeClipStartTimeWithStartOrEnd ValidationExceptionType = "CLIP_START_TIME_WITH_START_OR_END" ValidationExceptionTypeStartTagTimeOffsetInvalid ValidationExceptionType = "START_TAG_TIME_OFFSET_INVALID" ) @@ -491,6 +526,14 @@ func (ValidationExceptionType) Values() []ValidationExceptionType { "DRM_SIGNALING_MISMATCH_SEGMENT_ENCRYPTION_STATUS", "ONLY_CMAF_INPUT_TYPE_ALLOW_FORCE_ENDPOINT_ERROR_CONFIGURATION", "SOURCE_DISRUPTIONS_ENABLED_INCORRECTLY", + "HARVESTED_MANIFEST_HAS_START_END_FILTER_CONFIGURATION", + "HARVESTED_MANIFEST_NOT_FOUND_ON_ENDPOINT", + "TOO_MANY_IN_PROGRESS_HARVEST_JOBS", + "HARVEST_JOB_INELIGIBLE_FOR_CANCELLATION", + "INVALID_HARVEST_JOB_DURATION", + "HARVEST_JOB_S3_DESTINATION_MISSING_OR_INCOMPLETE", + "HARVEST_JOB_UNABLE_TO_WRITE_TO_S3_DESTINATION", + "HARVEST_JOB_CUSTOMER_ENDPOINT_READ_ACCESS_DENIED", "CLIP_START_TIME_WITH_START_OR_END", "START_TAG_TIME_OFFSET_INVALID", } diff --git a/service/mediapackagev2/types/types.go b/service/mediapackagev2/types/types.go index 569ad0fdc48..ca242bc6a63 100644 --- a/service/mediapackagev2/types/types.go +++ b/service/mediapackagev2/types/types.go @@ -266,6 +266,19 @@ type DashUtcTiming struct { noSmithyDocumentSerde } +// The configuration for the destination where the harvested content will be +// exported. +type Destination struct { + + // The configuration for exporting harvested content to an S3 bucket. This + // includes details such as the bucket name and destination path within the bucket. + // + // This member is required. + S3Destination *S3DestinationConfig + + noSmithyDocumentSerde +} + // The parameters for encrypting content. type Encryption struct { @@ -615,6 +628,143 @@ type GetLowLatencyHlsManifestConfiguration struct { noSmithyDocumentSerde } +// Information about a harvested DASH manifest. +type HarvestedDashManifest struct { + + // The name of the harvested DASH manifest. + // + // This member is required. + ManifestName *string + + noSmithyDocumentSerde +} + +// Information about a harvested HLS manifest. +type HarvestedHlsManifest struct { + + // The name of the harvested HLS manifest. + // + // This member is required. + ManifestName *string + + noSmithyDocumentSerde +} + +// Information about a harvested Low-Latency HLS manifest. +type HarvestedLowLatencyHlsManifest struct { + + // The name of the harvested Low-Latency HLS manifest. + // + // This member is required. + ManifestName *string + + noSmithyDocumentSerde +} + +// A collection of harvested manifests of different types. +type HarvestedManifests struct { + + // A list of harvested DASH manifests. + DashManifests []HarvestedDashManifest + + // A list of harvested HLS manifests. + HlsManifests []HarvestedHlsManifest + + // A list of harvested Low-Latency HLS manifests. + LowLatencyHlsManifests []HarvestedLowLatencyHlsManifest + + noSmithyDocumentSerde +} + +// Defines the schedule configuration for a harvest job. +type HarvesterScheduleConfiguration struct { + + // The end time for the harvest job. + // + // This member is required. + EndTime *time.Time + + // The start time for the harvest job. + // + // This member is required. + StartTime *time.Time + + noSmithyDocumentSerde +} + +// Represents a harvest job resource in MediaPackage v2, which is used to export +// content from an origin endpoint to an S3 bucket. +type HarvestJob struct { + + // The Amazon Resource Name (ARN) of the harvest job. + // + // This member is required. + Arn *string + + // The name of the channel group containing the channel associated with this + // harvest job. + // + // This member is required. + ChannelGroupName *string + + // The name of the channel associated with this harvest job. + // + // This member is required. + ChannelName *string + + // The date and time when the harvest job was created. + // + // This member is required. + CreatedAt *time.Time + + // The S3 destination where the harvested content will be placed. + // + // This member is required. + Destination *Destination + + // The name of the harvest job. + // + // This member is required. + HarvestJobName *string + + // A list of manifests that are being or have been harvested. + // + // This member is required. + HarvestedManifests *HarvestedManifests + + // The date and time when the harvest job was last modified. + // + // This member is required. + ModifiedAt *time.Time + + // The name of the origin endpoint associated with this harvest job. + // + // This member is required. + OriginEndpointName *string + + // The configuration for when the harvest job is scheduled to run. + // + // This member is required. + ScheduleConfiguration *HarvesterScheduleConfiguration + + // The current status of the harvest job (e.g., QUEUED, IN_PROGRESS, CANCELLED, + // COMPLETED, FAILED). + // + // This member is required. + Status HarvestJobStatus + + // An optional description of the harvest job. + Description *string + + // The current version of the harvest job. Used for concurrency control. + ETag *string + + // An error message if the harvest job encountered any issues. + ErrorMessage *string + + noSmithyDocumentSerde +} + // The ingest domain URL where the source stream should be sent. type IngestEndpoint struct { @@ -755,6 +905,24 @@ type OriginEndpointListConfiguration struct { noSmithyDocumentSerde } +// Configuration parameters for where in an S3 bucket to place the harvested +// content. +type S3DestinationConfig struct { + + // The name of an S3 bucket within which harvested content will be exported. + // + // This member is required. + BucketName *string + + // The path within the specified S3 bucket where the harvested content will be + // placed. + // + // This member is required. + DestinationPath *string + + noSmithyDocumentSerde +} + // The SCTE configuration. type Scte struct { diff --git a/service/mediapackagev2/validators.go b/service/mediapackagev2/validators.go index 222eab7a808..d6909c0a464 100644 --- a/service/mediapackagev2/validators.go +++ b/service/mediapackagev2/validators.go @@ -10,6 +10,26 @@ import ( "github.com/aws/smithy-go/middleware" ) +type validateOpCancelHarvestJob struct { +} + +func (*validateOpCancelHarvestJob) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCancelHarvestJob) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CancelHarvestJobInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCancelHarvestJobInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreateChannelGroup struct { } @@ -50,6 +70,26 @@ func (m *validateOpCreateChannel) HandleInitialize(ctx context.Context, in middl return next.HandleInitialize(ctx, in) } +type validateOpCreateHarvestJob struct { +} + +func (*validateOpCreateHarvestJob) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateHarvestJob) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateHarvestJobInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateHarvestJobInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreateOriginEndpoint struct { } @@ -230,6 +270,26 @@ func (m *validateOpGetChannelPolicy) HandleInitialize(ctx context.Context, in mi return next.HandleInitialize(ctx, in) } +type validateOpGetHarvestJob struct { +} + +func (*validateOpGetHarvestJob) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetHarvestJob) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetHarvestJobInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetHarvestJobInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpGetOriginEndpoint struct { } @@ -290,6 +350,26 @@ func (m *validateOpListChannels) HandleInitialize(ctx context.Context, in middle return next.HandleInitialize(ctx, in) } +type validateOpListHarvestJobs struct { +} + +func (*validateOpListHarvestJobs) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListHarvestJobs) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListHarvestJobsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListHarvestJobsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpListOriginEndpoints struct { } @@ -470,6 +550,10 @@ func (m *validateOpUpdateOriginEndpoint) HandleInitialize(ctx context.Context, i return next.HandleInitialize(ctx, in) } +func addOpCancelHarvestJobValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCancelHarvestJob{}, middleware.After) +} + func addOpCreateChannelGroupValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateChannelGroup{}, middleware.After) } @@ -478,6 +562,10 @@ func addOpCreateChannelValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateChannel{}, middleware.After) } +func addOpCreateHarvestJobValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateHarvestJob{}, middleware.After) +} + func addOpCreateOriginEndpointValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateOriginEndpoint{}, middleware.After) } @@ -514,6 +602,10 @@ func addOpGetChannelPolicyValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetChannelPolicy{}, middleware.After) } +func addOpGetHarvestJobValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetHarvestJob{}, middleware.After) +} + func addOpGetOriginEndpointValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetOriginEndpoint{}, middleware.After) } @@ -526,6 +618,10 @@ func addOpListChannelsValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpListChannels{}, middleware.After) } +func addOpListHarvestJobsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListHarvestJobs{}, middleware.After) +} + func addOpListOriginEndpointsValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpListOriginEndpoints{}, middleware.After) } @@ -668,6 +764,25 @@ func validateCreateLowLatencyHlsManifests(v []types.CreateLowLatencyHlsManifestC } } +func validateDestination(v *types.Destination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Destination"} + if v.S3Destination == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3Destination")) + } else if v.S3Destination != nil { + if err := validateS3DestinationConfig(v.S3Destination); err != nil { + invalidParams.AddNested("S3Destination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateEncryption(v *types.Encryption) error { if v == nil { return nil @@ -708,6 +823,165 @@ func validateEncryptionContractConfiguration(v *types.EncryptionContractConfigur } } +func validateHarvestedDashManifest(v *types.HarvestedDashManifest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HarvestedDashManifest"} + if v.ManifestName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ManifestName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateHarvestedDashManifestsList(v []types.HarvestedDashManifest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HarvestedDashManifestsList"} + for i := range v { + if err := validateHarvestedDashManifest(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateHarvestedHlsManifest(v *types.HarvestedHlsManifest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HarvestedHlsManifest"} + if v.ManifestName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ManifestName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateHarvestedHlsManifestsList(v []types.HarvestedHlsManifest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HarvestedHlsManifestsList"} + for i := range v { + if err := validateHarvestedHlsManifest(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateHarvestedLowLatencyHlsManifest(v *types.HarvestedLowLatencyHlsManifest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HarvestedLowLatencyHlsManifest"} + if v.ManifestName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ManifestName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateHarvestedLowLatencyHlsManifestsList(v []types.HarvestedLowLatencyHlsManifest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HarvestedLowLatencyHlsManifestsList"} + for i := range v { + if err := validateHarvestedLowLatencyHlsManifest(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateHarvestedManifests(v *types.HarvestedManifests) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HarvestedManifests"} + if v.HlsManifests != nil { + if err := validateHarvestedHlsManifestsList(v.HlsManifests); err != nil { + invalidParams.AddNested("HlsManifests", err.(smithy.InvalidParamsError)) + } + } + if v.DashManifests != nil { + if err := validateHarvestedDashManifestsList(v.DashManifests); err != nil { + invalidParams.AddNested("DashManifests", err.(smithy.InvalidParamsError)) + } + } + if v.LowLatencyHlsManifests != nil { + if err := validateHarvestedLowLatencyHlsManifestsList(v.LowLatencyHlsManifests); err != nil { + invalidParams.AddNested("LowLatencyHlsManifests", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateHarvesterScheduleConfiguration(v *types.HarvesterScheduleConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "HarvesterScheduleConfiguration"} + if v.StartTime == nil { + invalidParams.Add(smithy.NewErrParamRequired("StartTime")) + } + if v.EndTime == nil { + invalidParams.Add(smithy.NewErrParamRequired("EndTime")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateS3DestinationConfig(v *types.S3DestinationConfig) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "S3DestinationConfig"} + if v.BucketName == nil { + invalidParams.Add(smithy.NewErrParamRequired("BucketName")) + } + if v.DestinationPath == nil { + invalidParams.Add(smithy.NewErrParamRequired("DestinationPath")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateSegment(v *types.Segment) error { if v == nil { return nil @@ -771,6 +1045,30 @@ func validateStartTag(v *types.StartTag) error { } } +func validateOpCancelHarvestJobInput(v *CancelHarvestJobInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CancelHarvestJobInput"} + if v.ChannelGroupName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ChannelGroupName")) + } + if v.ChannelName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ChannelName")) + } + if v.OriginEndpointName == nil { + invalidParams.Add(smithy.NewErrParamRequired("OriginEndpointName")) + } + if v.HarvestJobName == nil { + invalidParams.Add(smithy.NewErrParamRequired("HarvestJobName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateChannelGroupInput(v *CreateChannelGroupInput) error { if v == nil { return nil @@ -804,6 +1102,48 @@ func validateOpCreateChannelInput(v *CreateChannelInput) error { } } +func validateOpCreateHarvestJobInput(v *CreateHarvestJobInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateHarvestJobInput"} + if v.ChannelGroupName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ChannelGroupName")) + } + if v.ChannelName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ChannelName")) + } + if v.OriginEndpointName == nil { + invalidParams.Add(smithy.NewErrParamRequired("OriginEndpointName")) + } + if v.HarvestedManifests == nil { + invalidParams.Add(smithy.NewErrParamRequired("HarvestedManifests")) + } else if v.HarvestedManifests != nil { + if err := validateHarvestedManifests(v.HarvestedManifests); err != nil { + invalidParams.AddNested("HarvestedManifests", err.(smithy.InvalidParamsError)) + } + } + if v.ScheduleConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("ScheduleConfiguration")) + } else if v.ScheduleConfiguration != nil { + if err := validateHarvesterScheduleConfiguration(v.ScheduleConfiguration); err != nil { + invalidParams.AddNested("ScheduleConfiguration", err.(smithy.InvalidParamsError)) + } + } + if v.Destination == nil { + invalidParams.Add(smithy.NewErrParamRequired("Destination")) + } else if v.Destination != nil { + if err := validateDestination(v.Destination); err != nil { + invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateOriginEndpointInput(v *CreateOriginEndpointInput) error { if v == nil { return nil @@ -992,6 +1332,30 @@ func validateOpGetChannelPolicyInput(v *GetChannelPolicyInput) error { } } +func validateOpGetHarvestJobInput(v *GetHarvestJobInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetHarvestJobInput"} + if v.ChannelGroupName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ChannelGroupName")) + } + if v.ChannelName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ChannelName")) + } + if v.OriginEndpointName == nil { + invalidParams.Add(smithy.NewErrParamRequired("OriginEndpointName")) + } + if v.HarvestJobName == nil { + invalidParams.Add(smithy.NewErrParamRequired("HarvestJobName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpGetOriginEndpointInput(v *GetOriginEndpointInput) error { if v == nil { return nil @@ -1049,6 +1413,21 @@ func validateOpListChannelsInput(v *ListChannelsInput) error { } } +func validateOpListHarvestJobsInput(v *ListHarvestJobsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListHarvestJobsInput"} + if v.ChannelGroupName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ChannelGroupName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpListOriginEndpointsInput(v *ListOriginEndpointsInput) error { if v == nil { return nil diff --git a/service/opensearch/deserializers.go b/service/opensearch/deserializers.go index cbf4a83db92..fadb61378f5 100644 --- a/service/opensearch/deserializers.go +++ b/service/opensearch/deserializers.go @@ -12893,6 +12893,11 @@ func awsRestjson1_deserializeDocumentClusterConfig(v **types.ClusterConfig, valu sv.MultiAZWithStandbyEnabled = ptr.Bool(jtv) } + case "NodeOptions": + if err := awsRestjson1_deserializeDocumentNodeOptionsList(&sv.NodeOptions, value); err != nil { + return err + } + case "WarmCount": if value != nil { jtv, ok := value.(json.Number) @@ -16336,6 +16341,147 @@ func awsRestjson1_deserializeDocumentNaturalLanguageQueryGenerationOptionsOutput return nil } +func awsRestjson1_deserializeDocumentNodeConfig(v **types.NodeConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NodeConfig + if *v == nil { + sv = &types.NodeConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Count": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntegerClass to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Count = ptr.Int32(int32(i64)) + } + + case "Enabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.Enabled = ptr.Bool(jtv) + } + + case "Type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected OpenSearchPartitionInstanceType to be of type string, got %T instead", value) + } + sv.Type = types.OpenSearchPartitionInstanceType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentNodeOption(v **types.NodeOption, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NodeOption + if *v == nil { + sv = &types.NodeOption{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NodeConfig": + if err := awsRestjson1_deserializeDocumentNodeConfig(&sv.NodeConfig, value); err != nil { + return err + } + + case "NodeType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NodeOptionsNodeType to be of type string, got %T instead", value) + } + sv.NodeType = types.NodeOptionsNodeType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentNodeOptionsList(v *[]types.NodeOption, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.NodeOption + if *v == nil { + cv = []types.NodeOption{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.NodeOption + destAddr := &col + if err := awsRestjson1_deserializeDocumentNodeOption(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsRestjson1_deserializeDocumentNodeToNodeEncryptionOptions(v **types.NodeToNodeEncryptionOptions, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/opensearch/serializers.go b/service/opensearch/serializers.go index 6f3fe26cf12..7c0cc10054d 100644 --- a/service/opensearch/serializers.go +++ b/service/opensearch/serializers.go @@ -5696,6 +5696,13 @@ func awsRestjson1_serializeDocumentClusterConfig(v *types.ClusterConfig, value s ok.Boolean(*v.MultiAZWithStandbyEnabled) } + if v.NodeOptions != nil { + ok := object.Key("NodeOptions") + if err := awsRestjson1_serializeDocumentNodeOptionsList(v.NodeOptions, ok); err != nil { + return err + } + } + if v.WarmCount != nil { ok := object.Key("WarmCount") ok.Integer(*v.WarmCount) @@ -6104,6 +6111,60 @@ func awsRestjson1_serializeDocumentNaturalLanguageQueryGenerationOptionsInput(v return nil } +func awsRestjson1_serializeDocumentNodeConfig(v *types.NodeConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Count != nil { + ok := object.Key("Count") + ok.Integer(*v.Count) + } + + if v.Enabled != nil { + ok := object.Key("Enabled") + ok.Boolean(*v.Enabled) + } + + if len(v.Type) > 0 { + ok := object.Key("Type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsRestjson1_serializeDocumentNodeOption(v *types.NodeOption, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.NodeConfig != nil { + ok := object.Key("NodeConfig") + if err := awsRestjson1_serializeDocumentNodeConfig(v.NodeConfig, ok); err != nil { + return err + } + } + + if len(v.NodeType) > 0 { + ok := object.Key("NodeType") + ok.String(string(v.NodeType)) + } + + return nil +} + +func awsRestjson1_serializeDocumentNodeOptionsList(v []types.NodeOption, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentNodeOption(&v[i], av); err != nil { + return err + } + } + return nil +} + func awsRestjson1_serializeDocumentNodeToNodeEncryptionOptions(v *types.NodeToNodeEncryptionOptions, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/opensearch/types/enums.go b/service/opensearch/types/enums.go index f659df614ac..31f957fa3ea 100644 --- a/service/opensearch/types/enums.go +++ b/service/opensearch/types/enums.go @@ -602,6 +602,23 @@ func (NaturalLanguageQueryGenerationDesiredState) Values() []NaturalLanguageQuer } } +type NodeOptionsNodeType string + +// Enum values for NodeOptionsNodeType +const ( + NodeOptionsNodeTypeCoordinator NodeOptionsNodeType = "coordinator" +) + +// Values returns all known values for NodeOptionsNodeType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (NodeOptionsNodeType) Values() []NodeOptionsNodeType { + return []NodeOptionsNodeType{ + "coordinator", + } +} + type NodeStatus string // Enum values for NodeStatus diff --git a/service/opensearch/types/types.go b/service/opensearch/types/types.go index 94477c0deb4..966da2a4530 100644 --- a/service/opensearch/types/types.go +++ b/service/opensearch/types/types.go @@ -556,6 +556,9 @@ type ClusterConfig struct { // [Configuring a multi-AZ domain in Amazon OpenSearch Service]: https://docs.aws.amazon.com/opensearch-service/latest/developerguide/managedomains-multiaz.html MultiAZWithStandbyEnabled *bool + // List of node options for the domain. + NodeOptions []NodeOption + // The number of warm nodes in the cluster. WarmCount *int32 @@ -1573,6 +1576,33 @@ type NaturalLanguageQueryGenerationOptionsOutput struct { noSmithyDocumentSerde } +// Container for specifying configuration of any node type. +type NodeConfig struct { + + // The number of nodes of a particular node type in the cluster. + Count *int32 + + // A boolean that indicates whether a particular node type is enabled or not. + Enabled *bool + + // The instance type of a particular node type in the cluster. + Type OpenSearchPartitionInstanceType + + noSmithyDocumentSerde +} + +// Container for specifying node type. +type NodeOption struct { + + // Container for specifying configuration of any node type. + NodeConfig *NodeConfig + + // Container for node type like coordinating. + NodeType NodeOptionsNodeType + + noSmithyDocumentSerde +} + // Enables or disables node-to-node encryption. For more information, see [Node-to-node encryption for Amazon OpenSearch Service]. // // [Node-to-node encryption for Amazon OpenSearch Service]: https://docs.aws.amazon.com/opensearch-service/latest/developerguide/ntn.html diff --git a/service/rds/api_op_ApplyPendingMaintenanceAction.go b/service/rds/api_op_ApplyPendingMaintenanceAction.go index a3fe74dd76a..a8410f476ca 100644 --- a/service/rds/api_op_ApplyPendingMaintenanceAction.go +++ b/service/rds/api_op_ApplyPendingMaintenanceAction.go @@ -32,8 +32,22 @@ type ApplyPendingMaintenanceActionInput struct { // The pending maintenance action to apply to this resource. // - // Valid Values: system-update , db-upgrade , hardware-maintenance , - // ca-certificate-rotation + // Valid Values: + // + // - ca-certificate-rotation + // + // - db-upgrade + // + // - hardware-maintenance + // + // - os-upgrade + // + // - system-update + // + // For more information about these actions, see [Maintenance actions for Amazon Aurora] or [Maintenance actions for Amazon RDS]. + // + // [Maintenance actions for Amazon RDS]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#maintenance-actions-rds + // [Maintenance actions for Amazon Aurora]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#maintenance-actions-aurora // // This member is required. ApplyAction *string diff --git a/service/rds/api_op_CreateDBCluster.go b/service/rds/api_op_CreateDBCluster.go index 3c407031cba..4897c02913f 100644 --- a/service/rds/api_op_CreateDBCluster.go +++ b/service/rds/api_op_CreateDBCluster.go @@ -319,6 +319,8 @@ type CreateDBClusterInput struct { // Limitless Database to create a DB shard group. // // Valid for: Aurora DB clusters only + // + // This setting is no longer used. Instead use the ClusterScalabilityType setting. EnableLimitlessDatabase *bool // Specifies whether read replicas can forward write operations to the writer DB diff --git a/service/rds/api_op_ModifyDBCluster.go b/service/rds/api_op_ModifyDBCluster.go index dae5a4de44b..015ce2c55c5 100644 --- a/service/rds/api_op_ModifyDBCluster.go +++ b/service/rds/api_op_ModifyDBCluster.go @@ -280,6 +280,9 @@ type ModifyDBClusterInput struct { // Limitless Database to create a DB shard group. // // Valid for: Aurora DB clusters only + // + // This setting is no longer used. Instead use the ClusterScalabilityType setting + // when you create your Aurora Limitless Database DB cluster. EnableLimitlessDatabase *bool // Specifies whether read replicas can forward write operations to the writer DB diff --git a/service/rds/api_op_RestoreDBClusterFromSnapshot.go b/service/rds/api_op_RestoreDBClusterFromSnapshot.go index 34fb977dbd0..7719803c435 100644 --- a/service/rds/api_op_RestoreDBClusterFromSnapshot.go +++ b/service/rds/api_op_RestoreDBClusterFromSnapshot.go @@ -231,6 +231,9 @@ type RestoreDBClusterFromSnapshotInput struct { // [IAM database authentication for MariaDB, MySQL, and PostgreSQL]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html EnableIAMDatabaseAuthentication *bool + // Specifies whether to turn on Performance Insights for the DB cluster. + EnablePerformanceInsights *bool + // The life cycle type for this DB cluster. // // By default, this value is set to open-source-rds-extended-support , which @@ -353,6 +356,26 @@ type RestoreDBClusterFromSnapshotInput struct { // Valid for: Aurora DB clusters and Multi-AZ DB clusters KmsKeyId *string + // The interval, in seconds, between points when Enhanced Monitoring metrics are + // collected for the DB cluster. To turn off collecting Enhanced Monitoring + // metrics, specify 0 . + // + // If MonitoringRoleArn is specified, also set MonitoringInterval to a value other + // than 0 . + // + // Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 + // + // Default: 0 + MonitoringInterval *int32 + + // The Amazon Resource Name (ARN) for the IAM role that permits RDS to send + // Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is + // arn:aws:iam:123456789012:role/emaccess . + // + // If MonitoringInterval is set to a value other than 0 , supply a + // MonitoringRoleArn value. + MonitoringRoleArn *string + // The network type of the DB cluster. // // Valid Values: @@ -377,6 +400,35 @@ type RestoreDBClusterFromSnapshotInput struct { // DB clusters are associated with a default option group that can't be modified. OptionGroupName *string + // The Amazon Web Services KMS key identifier for encryption of Performance + // Insights data. + // + // The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, + // or alias name for the KMS key. + // + // If you don't specify a value for PerformanceInsightsKMSKeyId , then Amazon RDS + // uses your default KMS key. There is a default KMS key for your Amazon Web + // Services account. Your Amazon Web Services account has a different default KMS + // key for each Amazon Web Services Region. + PerformanceInsightsKMSKeyId *string + + // The number of days to retain Performance Insights data. + // + // Valid Values: + // + // - 7 + // + // - month * 31, where month is a number of months from 1-23. Examples: 93 (3 + // months * 31), 341 (11 months * 31), 589 (19 months * 31) + // + // - 731 + // + // Default: 7 days + // + // If you specify a retention period that isn't valid, such as 94 , Amazon RDS + // issues an error. + PerformanceInsightsRetentionPeriod *int32 + // The port number on which the new DB cluster accepts connections. // // Constraints: This value must be 1150-65535 diff --git a/service/rds/api_op_RestoreDBClusterToPointInTime.go b/service/rds/api_op_RestoreDBClusterToPointInTime.go index 269f0964a73..40102b17c20 100644 --- a/service/rds/api_op_RestoreDBClusterToPointInTime.go +++ b/service/rds/api_op_RestoreDBClusterToPointInTime.go @@ -192,6 +192,9 @@ type RestoreDBClusterToPointInTimeInput struct { // [IAM database authentication for MariaDB, MySQL, and PostgreSQL]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html EnableIAMDatabaseAuthentication *bool + // Specifies whether to turn on Performance Insights for the DB cluster. + EnablePerformanceInsights *bool + // The life cycle type for this DB cluster. // // By default, this value is set to open-source-rds-extended-support , which @@ -270,6 +273,26 @@ type RestoreDBClusterToPointInTimeInput struct { // Valid for: Aurora DB clusters and Multi-AZ DB clusters KmsKeyId *string + // The interval, in seconds, between points when Enhanced Monitoring metrics are + // collected for the DB cluster. To turn off collecting Enhanced Monitoring + // metrics, specify 0 . + // + // If MonitoringRoleArn is specified, also set MonitoringInterval to a value other + // than 0 . + // + // Valid Values: 0 | 1 | 5 | 10 | 15 | 30 | 60 + // + // Default: 0 + MonitoringInterval *int32 + + // The Amazon Resource Name (ARN) for the IAM role that permits RDS to send + // Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is + // arn:aws:iam:123456789012:role/emaccess . + // + // If MonitoringInterval is set to a value other than 0 , supply a + // MonitoringRoleArn value. + MonitoringRoleArn *string + // The network type of the DB cluster. // // Valid Values: @@ -294,6 +317,35 @@ type RestoreDBClusterToPointInTimeInput struct { // DB clusters are associated with a default option group that can't be modified. OptionGroupName *string + // The Amazon Web Services KMS key identifier for encryption of Performance + // Insights data. + // + // The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, + // or alias name for the KMS key. + // + // If you don't specify a value for PerformanceInsightsKMSKeyId , then Amazon RDS + // uses your default KMS key. There is a default KMS key for your Amazon Web + // Services account. Your Amazon Web Services account has a different default KMS + // key for each Amazon Web Services Region. + PerformanceInsightsKMSKeyId *string + + // The number of days to retain Performance Insights data. + // + // Valid Values: + // + // - 7 + // + // - month * 31, where month is a number of months from 1-23. Examples: 93 (3 + // months * 31), 341 (11 months * 31), 589 (19 months * 31) + // + // - 731 + // + // Default: 7 days + // + // If you specify a retention period that isn't valid, such as 94 , Amazon RDS + // issues an error. + PerformanceInsightsRetentionPeriod *int32 + // The port number on which the new DB cluster accepts connections. // // Constraints: A value from 1150-65535 . diff --git a/service/rds/serializers.go b/service/rds/serializers.go index 3db265fa59a..b0752c869de 100644 --- a/service/rds/serializers.go +++ b/service/rds/serializers.go @@ -17703,6 +17703,11 @@ func awsAwsquery_serializeOpDocumentRestoreDBClusterFromSnapshotInput(v *Restore objectKey.Boolean(*v.EnableIAMDatabaseAuthentication) } + if v.EnablePerformanceInsights != nil { + objectKey := object.Key("EnablePerformanceInsights") + objectKey.Boolean(*v.EnablePerformanceInsights) + } + if v.Engine != nil { objectKey := object.Key("Engine") objectKey.String(*v.Engine) @@ -17733,6 +17738,16 @@ func awsAwsquery_serializeOpDocumentRestoreDBClusterFromSnapshotInput(v *Restore objectKey.String(*v.KmsKeyId) } + if v.MonitoringInterval != nil { + objectKey := object.Key("MonitoringInterval") + objectKey.Integer(*v.MonitoringInterval) + } + + if v.MonitoringRoleArn != nil { + objectKey := object.Key("MonitoringRoleArn") + objectKey.String(*v.MonitoringRoleArn) + } + if v.NetworkType != nil { objectKey := object.Key("NetworkType") objectKey.String(*v.NetworkType) @@ -17743,6 +17758,16 @@ func awsAwsquery_serializeOpDocumentRestoreDBClusterFromSnapshotInput(v *Restore objectKey.String(*v.OptionGroupName) } + if v.PerformanceInsightsKMSKeyId != nil { + objectKey := object.Key("PerformanceInsightsKMSKeyId") + objectKey.String(*v.PerformanceInsightsKMSKeyId) + } + + if v.PerformanceInsightsRetentionPeriod != nil { + objectKey := object.Key("PerformanceInsightsRetentionPeriod") + objectKey.Integer(*v.PerformanceInsightsRetentionPeriod) + } + if v.Port != nil { objectKey := object.Key("Port") objectKey.Integer(*v.Port) @@ -17862,6 +17887,11 @@ func awsAwsquery_serializeOpDocumentRestoreDBClusterToPointInTimeInput(v *Restor objectKey.Boolean(*v.EnableIAMDatabaseAuthentication) } + if v.EnablePerformanceInsights != nil { + objectKey := object.Key("EnablePerformanceInsights") + objectKey.Boolean(*v.EnablePerformanceInsights) + } + if v.EngineLifecycleSupport != nil { objectKey := object.Key("EngineLifecycleSupport") objectKey.String(*v.EngineLifecycleSupport) @@ -17882,6 +17912,16 @@ func awsAwsquery_serializeOpDocumentRestoreDBClusterToPointInTimeInput(v *Restor objectKey.String(*v.KmsKeyId) } + if v.MonitoringInterval != nil { + objectKey := object.Key("MonitoringInterval") + objectKey.Integer(*v.MonitoringInterval) + } + + if v.MonitoringRoleArn != nil { + objectKey := object.Key("MonitoringRoleArn") + objectKey.String(*v.MonitoringRoleArn) + } + if v.NetworkType != nil { objectKey := object.Key("NetworkType") objectKey.String(*v.NetworkType) @@ -17892,6 +17932,16 @@ func awsAwsquery_serializeOpDocumentRestoreDBClusterToPointInTimeInput(v *Restor objectKey.String(*v.OptionGroupName) } + if v.PerformanceInsightsKMSKeyId != nil { + objectKey := object.Key("PerformanceInsightsKMSKeyId") + objectKey.String(*v.PerformanceInsightsKMSKeyId) + } + + if v.PerformanceInsightsRetentionPeriod != nil { + objectKey := object.Key("PerformanceInsightsRetentionPeriod") + objectKey.Integer(*v.PerformanceInsightsRetentionPeriod) + } + if v.Port != nil { objectKey := object.Key("Port") objectKey.Integer(*v.Port) diff --git a/service/rds/types/types.go b/service/rds/types/types.go index 7d45e48da5f..788994f7c28 100644 --- a/service/rds/types/types.go +++ b/service/rds/types/types.go @@ -4202,10 +4202,23 @@ type PendingMaintenanceAction struct { // // For more information about maintenance actions, see [Maintaining a DB instance]. // - // Valid Values: system-update | db-upgrade | hardware-maintenance | - // ca-certificate-rotation + // Valid Values: + // + // - ca-certificate-rotation + // + // - db-upgrade + // + // - hardware-maintenance + // + // - os-upgrade + // + // - system-update + // + // For more information about these actions, see [Maintenance actions for Amazon Aurora] or [Maintenance actions for Amazon RDS]. // + // [Maintenance actions for Amazon RDS]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#maintenance-actions-rds // [Maintaining a DB instance]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html + // [Maintenance actions for Amazon Aurora]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#maintenance-actions-aurora Action *string // The date of the maintenance window when the action is applied. The maintenance diff --git a/service/storagegateway/api_op_ActivateGateway.go b/service/storagegateway/api_op_ActivateGateway.go index 98157942505..30862a8c3b9 100644 --- a/service/storagegateway/api_op_ActivateGateway.go +++ b/service/storagegateway/api_op_ActivateGateway.go @@ -99,7 +99,13 @@ type ActivateGatewayInput struct { // critical to all later functions of the gateway and cannot be changed after // activation. The default value is CACHED . // + // Amazon FSx File Gateway is no longer available to new customers. Existing + // customers of FSx File Gateway can continue to use the service normally. For + // capabilities similar to FSx File Gateway, visit [this blog post]. + // // Valid Values: STORED | CACHED | VTL | FILE_S3 | FILE_FSX_SMB + // + // [this blog post]: https://aws.amazon.com/blogs/storage/switch-your-file-share-access-from-amazon-fsx-file-gateway-to-amazon-fsx-for-windows-file-server/ GatewayType *string // The value that indicates the type of medium changer to use for tape gateway. diff --git a/service/storagegateway/api_op_CreateNFSFileShare.go b/service/storagegateway/api_op_CreateNFSFileShare.go index 16a660c5b23..bf48304c355 100644 --- a/service/storagegateway/api_op_CreateNFSFileShare.go +++ b/service/storagegateway/api_op_CreateNFSFileShare.go @@ -65,7 +65,7 @@ type CreateNFSFileShareInput struct { // // Bucket ARN: // - // arn:aws:s3:::my-bucket/prefix/ + // arn:aws:s3:::amzn-s3-demo-bucket/prefix/ // // Access point ARN: // @@ -130,6 +130,9 @@ type CreateNFSFileShareInput struct { // // FileShareName must be set if an S3 prefix name is set in LocationARN , or if an // access point or access point alias is used. + // + // A valid NFS file share name can only contain the following characters: a - z , A + // - Z , 0 - 9 , - , . , and _ . FileShareName *string // A value that enables guessing of the MIME type for uploaded objects based on diff --git a/service/storagegateway/api_op_CreateSMBFileShare.go b/service/storagegateway/api_op_CreateSMBFileShare.go index b7f6f33cff3..e52382124fd 100644 --- a/service/storagegateway/api_op_CreateSMBFileShare.go +++ b/service/storagegateway/api_op_CreateSMBFileShare.go @@ -64,7 +64,7 @@ type CreateSMBFileShareInput struct { // // Bucket ARN: // - // arn:aws:s3:::my-bucket/prefix/ + // arn:aws:s3:::amzn-s3-demo-bucket/prefix/ // // Access point ARN: // @@ -149,6 +149,9 @@ type CreateSMBFileShareInput struct { // // FileShareName must be set if an S3 prefix name is set in LocationARN , or if an // access point or access point alias is used. + // + // A valid SMB file share name cannot contain the following characters: [ , ] , # , + // ; , < , > , : , " , \ , / , | , ? , * , + , or ASCII control characters 1-31 . FileShareName *string // A value that enables guessing of the MIME type for uploaded objects based on diff --git a/service/storagegateway/api_op_DescribeGatewayInformation.go b/service/storagegateway/api_op_DescribeGatewayInformation.go index 1b2601f7b82..dc6fc641064 100644 --- a/service/storagegateway/api_op_DescribeGatewayInformation.go +++ b/service/storagegateway/api_op_DescribeGatewayInformation.go @@ -90,6 +90,12 @@ type DescribeGatewayInformationOutput struct { GatewayTimezone *string // The type of the gateway. + // + // Amazon FSx File Gateway is no longer available to new customers. Existing + // customers of FSx File Gateway can continue to use the service normally. For + // capabilities similar to FSx File Gateway, visit [this blog post]. + // + // [this blog post]: https://aws.amazon.com/blogs/storage/switch-your-file-share-access-from-amazon-fsx-file-gateway-to-amazon-fsx-for-windows-file-server/ GatewayType *string // The type of hardware or software platform on which the gateway is running. diff --git a/service/storagegateway/api_op_UpdateNFSFileShare.go b/service/storagegateway/api_op_UpdateNFSFileShare.go index 949411e06ae..7056d14f4e2 100644 --- a/service/storagegateway/api_op_UpdateNFSFileShare.go +++ b/service/storagegateway/api_op_UpdateNFSFileShare.go @@ -84,6 +84,9 @@ type UpdateNFSFileShareInput struct { // // FileShareName must be set if an S3 prefix name is set in LocationARN , or if an // access point or access point alias is used. + // + // A valid NFS file share name can only contain the following characters: a - z , A + // - Z , 0 - 9 , - , . , and _ . FileShareName *string // A value that enables guessing of the MIME type for uploaded objects based on diff --git a/service/storagegateway/api_op_UpdateSMBFileShare.go b/service/storagegateway/api_op_UpdateSMBFileShare.go index 63d18252541..176eb4322fc 100644 --- a/service/storagegateway/api_op_UpdateSMBFileShare.go +++ b/service/storagegateway/api_op_UpdateSMBFileShare.go @@ -94,6 +94,9 @@ type UpdateSMBFileShareInput struct { // // FileShareName must be set if an S3 prefix name is set in LocationARN , or if an // access point or access point alias is used. + // + // A valid SMB file share name cannot contain the following characters: [ , ] , # , + // ; , < , > , : , " , \ , / , | , ? , * , + , or ASCII control characters 1-31 . FileShareName *string // A value that enables guessing of the MIME type for uploaded objects based on diff --git a/service/storagegateway/doc.go b/service/storagegateway/doc.go index a79abef3116..fd3a692588c 100644 --- a/service/storagegateway/doc.go +++ b/service/storagegateway/doc.go @@ -5,6 +5,10 @@ // // # Storage Gateway Service // +// Amazon FSx File Gateway is no longer available to new customers. Existing +// customers of FSx File Gateway can continue to use the service normally. For +// capabilities similar to FSx File Gateway, visit [this blog post]. +// // Storage Gateway is the service that connects an on-premises software appliance // with cloud-based storage to provide seamless and secure integration between an // organization's on-premises IT environment and the Amazon Web Services storage @@ -62,6 +66,7 @@ // [Storage Gateway endpoints and quotas]: https://docs.aws.amazon.com/general/latest/gr/sg.html // [Signing requests]: https://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPI.html#AWSStorageGatewaySigningRequests // [Error responses]: https://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPI.html#APIErrorResponses +// [this blog post]: https://aws.amazon.com/blogs/storage/switch-your-file-share-access-from-amazon-fsx-file-gateway-to-amazon-fsx-for-windows-file-server/ // [Operations in Storage Gateway]: https://docs.aws.amazon.com/storagegateway/latest/APIReference/API_Operations.html // [Announcement: Heads-up – Longer Storage Gateway volume and snapshot IDs coming in 2016]: http://forums.aws.amazon.com/ann.jspa?annID=3557 // [Longer EC2 and EBS resource IDs]: http://aws.amazon.com/ec2/faqs/#longer-ids diff --git a/service/storagegateway/internal/endpoints/endpoints.go b/service/storagegateway/internal/endpoints/endpoints.go index 6592a4265b5..ad3ea371f92 100644 --- a/service/storagegateway/internal/endpoints/endpoints.go +++ b/service/storagegateway/internal/endpoints/endpoints.go @@ -172,6 +172,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-southeast-4", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{}, diff --git a/service/storagegateway/types/types.go b/service/storagegateway/types/types.go index f810412dd45..26e08502d33 100644 --- a/service/storagegateway/types/types.go +++ b/service/storagegateway/types/types.go @@ -70,12 +70,13 @@ type AutomaticTapeCreationRule struct { noSmithyDocumentSerde } -// Describes a bandwidth rate limit interval for a gateway. A bandwidth rate +// Describes a bandwidth rate limit interval for a gateway. A bandwidth rate limit +// schedule consists of one or more bandwidth rate limit intervals. A bandwidth +// rate limit interval defines a period of time on one or more days of the week, +// during which bandwidth rate limits are specified for uploading, downloading, or +// both. // -// limit schedule consists of one or more bandwidth rate limit intervals. A -// bandwidth rate limit interval defines a period of time on one or more days of -// the week, during which bandwidth rate limits are specified for uploading, -// downloading, or both. +// FSx File Gateway does not support this feature. type BandwidthRateLimitInterval struct { // The days of the week component of the bandwidth rate limit interval, @@ -113,6 +114,8 @@ type BandwidthRateLimitInterval struct { // The average download rate limit component of the bandwidth rate limit // interval, in bits per second. This field does not appear in the response if the // download rate limit is not set. + // + // S3 File Gateway does not support this feature. AverageDownloadRateLimitInBitsPerSec *int64 // The average upload rate limit component of the bandwidth rate limit interval, @@ -121,7 +124,7 @@ type BandwidthRateLimitInterval struct { // // For Tape Gateway and Volume Gateway, the minimum value is 51200 . // - // For S3 File Gateway and FSx File Gateway, the minimum value is 104857600 . + // This field is required for S3 File Gateway, and the minimum value is 104857600 . AverageUploadRateLimitInBitsPerSec *int64 noSmithyDocumentSerde @@ -440,6 +443,12 @@ type GatewayInfo struct { GatewayOperationalState *string // The type of the gateway. + // + // Amazon FSx File Gateway is no longer available to new customers. Existing + // customers of FSx File Gateway can continue to use the service normally. For + // capabilities similar to FSx File Gateway, visit [this blog post]. + // + // [this blog post]: https://aws.amazon.com/blogs/storage/switch-your-file-share-access-from-amazon-fsx-file-gateway-to-amazon-fsx-for-windows-file-server/ GatewayType *string // The type of hardware or software platform on which the gateway is running. @@ -604,7 +613,7 @@ type NFSFileShareInfo struct { // // Bucket ARN: // - // arn:aws:s3:::my-bucket/prefix/ + // arn:aws:s3:::amzn-s3-demo-bucket/prefix/ // // Access point ARN: // @@ -864,7 +873,7 @@ type SMBFileShareInfo struct { // // Bucket ARN: // - // arn:aws:s3:::my-bucket/prefix/ + // arn:aws:s3:::amzn-s3-demo-bucket/prefix/ // // Access point ARN: //