diff --git a/.changelog/336e1255e74e44c297edd22a9d1b9205.json b/.changelog/336e1255e74e44c297edd22a9d1b9205.json new file mode 100644 index 00000000000..8e8f9a53cdc --- /dev/null +++ b/.changelog/336e1255e74e44c297edd22a9d1b9205.json @@ -0,0 +1,26 @@ +{ + "id": "336e1255-e74e-44c2-97ed-d22a9d1b9205", + "type": "feature", + "description": "Updated API client and endpoints to latest revision.", + "modules": [ + "service/chime", + "service/comprehend", + "service/ec2", + "service/ecr", + "service/iot", + "service/lexmodelsv2", + "service/lexruntimev2", + "service/macie2", + "service/mediapackagevod", + "service/networkfirewall", + "service/pinpoint", + "service/quicksight", + "service/rds", + "service/robomaker", + "service/s3", + "service/sagemaker", + "service/ssooidc", + "service/transcribe", + "service/wafv2" + ] +} \ No newline at end of file diff --git a/.changelog/41575353444b40ffbf474f4155544f00.json b/.changelog/41575353444b40ffbf474f4155544f00.json new file mode 100644 index 00000000000..e44c506c0fc --- /dev/null +++ b/.changelog/41575353444b40ffbf474f4155544f00.json @@ -0,0 +1,9 @@ +{ + "id": "41575353-444b-40ff-bf47-4f4155544f00", + "type": "release", + "description": "New AWS service client module", + "modules": [ + "service/kafkaconnect", + "service/transcribestreaming" + ] +} \ No newline at end of file diff --git a/.changelog/748836b8680843beb820250cd9f01b80.json b/.changelog/748836b8680843beb820250cd9f01b80.json new file mode 100644 index 00000000000..5a4b686fc58 --- /dev/null +++ b/.changelog/748836b8680843beb820250cd9f01b80.json @@ -0,0 +1,8 @@ +{ + "id": "748836b8-6808-43be-b820-250cd9f01b80", + "type": "documentation", + "description": "Updated API client documentation.", + "modules": [ + "service/cloudformation" + ] +} diff --git a/codegen/sdk-codegen/aws-models/chime.2018-05-01.json b/codegen/sdk-codegen/aws-models/chime.2018-05-01.json index 78f89d7e40d..675cdc61c94 100644 --- a/codegen/sdk-codegen/aws-models/chime.2018-05-01.json +++ b/codegen/sdk-codegen/aws-models/chime.2018-05-01.json @@ -4063,6 +4063,9 @@ "target": "com.amazonaws.chime#CreateSipMediaApplicationCallResponse" }, "errors": [ + { + "target": "com.amazonaws.chime#AccessDeniedException" + }, { "target": "com.amazonaws.chime#BadRequestException" }, @@ -4118,6 +4121,12 @@ "smithy.api#httpLabel": {}, "smithy.api#required": {} } + }, + "SipHeaders": { + "target": "com.amazonaws.chime#SipHeadersMap", + "traits": { + "smithy.api#documentation": "

The SIP headers added to an outbound call leg.

" + } } } }, @@ -13031,18 +13040,18 @@ "Routes": { "target": "com.amazonaws.chime#OriginationRouteList", "traits": { - "smithy.api#documentation": "

The call distribution properties defined for your SIP hosts. Valid range: Minimum value of 1.\n Maximum value of 20.

" + "smithy.api#documentation": "

The call distribution properties defined for your SIP hosts. Valid range: Minimum value of 1.\n Maximum value of 20. This parameter is not required, but you must specify this parameter or Disabled.

" } }, "Disabled": { "target": "com.amazonaws.chime#Boolean", "traits": { - "smithy.api#documentation": "

When origination settings are disabled, inbound calls are not enabled for your Amazon Chime\n Voice Connector.

" + "smithy.api#documentation": "

When origination settings are disabled, inbound calls are not enabled for your Amazon Chime\n Voice Connector. This parameter is not required, but you must specify this parameter or Routes.

" } } }, "traits": { - "smithy.api#documentation": "

Origination settings enable your SIP hosts to receive inbound calls using your Amazon Chime\n Voice Connector.

" + "smithy.api#documentation": "

Origination settings enable your SIP hosts to receive inbound calls using your Amazon Chime\n Voice Connector.

\n \n

The parameters listed below are not required, but you must use at least one.

\n
" } }, "com.amazonaws.chime#OriginationRoute": { @@ -13080,7 +13089,7 @@ } }, "traits": { - "smithy.api#documentation": "

Origination routes define call distribution properties for your SIP hosts to receive inbound\n calls using your Amazon Chime Voice Connector. Limit: Ten origination routes for each\n Amazon Chime Voice Connector.

" + "smithy.api#documentation": "

Origination routes define call distribution properties for your SIP hosts to receive inbound\n calls using your Amazon Chime Voice Connector. Limit: Ten origination routes for each\n Amazon Chime Voice Connector.

\n \n

The parameters listed below are not required, but you must use at least one.

\n
" } }, "com.amazonaws.chime#OriginationRouteList": { @@ -15616,6 +15625,21 @@ } } }, + "com.amazonaws.chime#SipHeadersMap": { + "type": "map", + "key": { + "target": "com.amazonaws.chime#SensitiveString" + }, + "value": { + "target": "com.amazonaws.chime#SensitiveString" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, "com.amazonaws.chime#SipMediaApplication": { "type": "structure", "members": { @@ -15906,7 +15930,7 @@ } ], "traits": { - "smithy.api#documentation": "

Start transcription for the specified meetingId.

", + "smithy.api#documentation": "

Starts transcription for the specified meetingId.

", "smithy.api#http": { "method": "POST", "uri": "/meetings/{MeetingId}/transcription?operation=start", @@ -16666,7 +16690,7 @@ "EngineTranscribeMedicalSettings": { "target": "com.amazonaws.chime#EngineTranscribeMedicalSettings", "traits": { - "smithy.api#documentation": "

The transcription configuration settings passed to Amazon Transcribe.

" + "smithy.api#documentation": "

The transcription configuration settings passed to Amazon Transcribe Medical.

" } } }, diff --git a/codegen/sdk-codegen/aws-models/cloudformation.2010-05-15.json b/codegen/sdk-codegen/aws-models/cloudformation.2010-05-15.json index f2a74ba004d..5bc1a623ae0 100644 --- a/codegen/sdk-codegen/aws-models/cloudformation.2010-05-15.json +++ b/codegen/sdk-codegen/aws-models/cloudformation.2010-05-15.json @@ -58,7 +58,7 @@ } }, "traits": { - "smithy.api#documentation": "

Structure that contains the results of the account gate function which\n CloudFormation invokes, if present, before proceeding with a stack set operation in an\n account and Region.

\n

For each account and Region, CloudFormation lets you specify a Lamdba function\n that encapsulates any requirements that must be met before CloudFormation can proceed with\n a stack set operation in that account and Region. CloudFormation invokes the function each\n time a stack set operation is requested for that account and Region; if the function\n returns FAILED, CloudFormation cancels the operation in that account and\n Region, and sets the stack set operation result status for that account and Region to\n FAILED.

\n

For more information, see Configuring a\n target account gate.

" + "smithy.api#documentation": "

Structure that contains the results of the account gate function which CloudFormation invokes, if present, before proceeding with a stack set operation in an\n account and Region.

\n

For each account and Region, CloudFormation lets you specify a Lambda\n function that encapsulates any requirements that must be met before CloudFormation\n can proceed with a stack set operation in that account and Region. CloudFormation\n invokes the function each time a stack set operation is requested for that account and\n Region; if the function returns FAILED, CloudFormation cancels the\n operation in that account and Region, and sets the stack set operation result status for\n that account and Region to FAILED.

\n

For more information, see Configuring a\n target account gate.

" } }, "com.amazonaws.cloudformation#AccountGateStatus": { @@ -2555,7 +2555,7 @@ "target": "com.amazonaws.cloudformation#DescribeStackResourceDriftsOutput" }, "traits": { - "smithy.api#documentation": "

Returns drift information for the resources that have been checked for drift in the\n specified stack. This includes actual and expected configuration values for resources where\n CloudFormation detects configuration drift.

\n

For a given stack, there will be one StackResourceDrift for each stack\n resource that has been checked for drift. Resources that have not yet been checked for\n drift are not included. Resources that do not currently support drift detection are not\n checked, and so not included. For a list of resources that support drift detection, see\n Resources that Support Drift Detection.

\n

Use DetectStackResourceDrift to detect drift on individual\n resources, or DetectStackDrift to detect drift on all supported resources\n for a given stack.

", + "smithy.api#documentation": "

Returns drift information for the resources that have been checked for drift in the\n specified stack. This includes actual and expected configuration values for resources where\n CloudFormation detects configuration drift.

\n

For a given stack, there will be one StackResourceDrift for each stack\n resource that has been checked for drift. Resources that haven't yet been checked for drift\n are not included. Resources that do not currently support drift detection are not checked,\n and so not included. For a list of resources that support drift detection, see Resources that Support Drift Detection.

\n

Use DetectStackResourceDrift to detect drift on individual\n resources, or DetectStackDrift to detect drift on all supported resources\n for a given stack.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -2810,7 +2810,7 @@ "target": "com.amazonaws.cloudformation#DescribeStacksOutput" }, "traits": { - "smithy.api#documentation": "

Returns the description for the specified stack; if no stack name was specified, then\n it returns the description for all the stacks created.

\n \n

If the stack does not exist, an AmazonCloudFormationException is\n returned.

\n
", + "smithy.api#documentation": "

Returns the description for the specified stack; if no stack name was specified, then\n it returns the description for all the stacks created.

\n \n

If the stack does not exist, an ValidationError is\n returned.

\n
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -4158,7 +4158,7 @@ "Capabilities": { "target": "com.amazonaws.cloudformation#Capabilities", "traits": { - "smithy.api#documentation": "

The capabilities found within the template. If your template contains IAM resources,\n you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when\n you use the CreateStack or UpdateStack actions with\n your template; otherwise, those actions return an InsufficientCapabilities error.

\n

For more information, see Acknowledging IAM Resources in CloudFormation Templates.

" + "smithy.api#documentation": "

The capabilities found within the template. If your template contains IAM resources,\n you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value for this parameter when\n you use the CreateStack or UpdateStack actions with\n your template; otherwise, those actions return an InsufficientCapabilities error.

\n

For more information, see Acknowledging IAM Resources in CloudFormation Templates.

" } }, "CapabilitiesReason": { diff --git a/codegen/sdk-codegen/aws-models/comprehend.2017-11-27.json b/codegen/sdk-codegen/aws-models/comprehend.2017-11-27.json index 2477d3aefd1..eecb5fdabfa 100644 --- a/codegen/sdk-codegen/aws-models/comprehend.2017-11-27.json +++ b/codegen/sdk-codegen/aws-models/comprehend.2017-11-27.json @@ -45,7 +45,22 @@ "min": 1, "max": 63 }, - "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" + } + }, + "com.amazonaws.comprehend#AugmentedManifestsDocumentTypeFormat": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PLAIN_TEXT_DOCUMENT", + "name": "PLAIN_TEXT_DOCUMENT" + }, + { + "value": "SEMI_STRUCTURED_DOCUMENT", + "name": "SEMI_STRUCTURED_DOCUMENT" + } + ] } }, "com.amazonaws.comprehend#AugmentedManifestsListItem": { @@ -64,6 +79,24 @@ "smithy.api#documentation": "

The JSON attribute that contains the annotations for your training documents. The number\n of attribute names that you specify depends on whether your augmented manifest file is the\n output of a single labeling job or a chained labeling job.

\n

If your file is the output of a single labeling job, specify the LabelAttributeName key\n that was used when the job was created in Ground Truth.

\n

If your file is the output of a chained labeling job, specify the LabelAttributeName key\n for one or more jobs in the chain. Each LabelAttributeName key provides the annotations from\n an individual job.

", "smithy.api#required": {} } + }, + "AnnotationDataS3Uri": { + "target": "com.amazonaws.comprehend#S3Uri", + "traits": { + "smithy.api#documentation": "

The S3 prefix to the annotation files that are referred in the augmented manifest file.

" + } + }, + "SourceDocumentsS3Uri": { + "target": "com.amazonaws.comprehend#S3Uri", + "traits": { + "smithy.api#documentation": "

The S3 prefix to the source files (PDFs) that are referred to in the augmented manifest file.

" + } + }, + "DocumentType": { + "target": "com.amazonaws.comprehend#AugmentedManifestsDocumentTypeFormat", + "traits": { + "smithy.api#documentation": "

The type of augmented manifest. PlainTextDocument or SemiStructuredDocument. If you don't specify, the default is PlainTextDocument.

\n " + } } }, "traits": { @@ -732,7 +765,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:[a-zA-Z0-9-]{1,64}/[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "smithy.api#pattern": "^arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:[a-zA-Z0-9-]{1,64}/[a-zA-Z0-9](-*[a-zA-Z0-9])*$" } }, "com.amazonaws.comprehend#ComprehendArnName": { @@ -752,7 +785,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:(document-classifier-endpoint|entity-recognizer-endpoint)/[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "smithy.api#pattern": "^arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:(document-classifier-endpoint|entity-recognizer-endpoint)/[a-zA-Z0-9](-*[a-zA-Z0-9])*$" } }, "com.amazonaws.comprehend#ComprehendEndpointName": { @@ -772,7 +805,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:(document-classifier|entity-recognizer)/[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "smithy.api#pattern": "^arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:(document-classifier|entity-recognizer)/[a-zA-Z0-9](-*[a-zA-Z0-9])*$" } }, "com.amazonaws.comprehend#Comprehend_20171127": { @@ -2581,7 +2614,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:document-classifier/[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "smithy.api#pattern": "^arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:document-classifier/[a-zA-Z0-9](-*[a-zA-Z0-9])*(/version/[a-zA-Z0-9](-*[a-zA-Z0-9])*)?$" } }, "com.amazonaws.comprehend#DocumentClassifierAugmentedManifestsList": { @@ -2612,7 +2645,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:document-classifier-endpoint/[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "smithy.api#pattern": "^arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:document-classifier-endpoint/[a-zA-Z0-9](-*[a-zA-Z0-9])*$" } }, "com.amazonaws.comprehend#DocumentClassifierFilter": { @@ -2838,6 +2871,79 @@ "smithy.api#documentation": "

Specifies one of the label or labels that categorize the document being analyzed.

" } }, + "com.amazonaws.comprehend#DocumentReadAction": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "TEXTRACT_DETECT_DOCUMENT_TEXT", + "name": "TEXTRACT_DETECT_DOCUMENT_TEXT" + }, + { + "value": "TEXTRACT_ANALYZE_DOCUMENT", + "name": "TEXTRACT_ANALYZE_DOCUMENT" + } + ] + } + }, + "com.amazonaws.comprehend#DocumentReadFeatureTypes": { + "type": "string", + "traits": { + "smithy.api#documentation": "

A list of the types of analyses to perform. This field specifies what feature types need to be extracted from the document where entity recognition is \n expected.

\n \n ", + "smithy.api#enum": [ + { + "value": "TABLES", + "name": "TABLES" + }, + { + "value": "FORMS", + "name": "FORMS" + } + ] + } + }, + "com.amazonaws.comprehend#DocumentReadMode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SERVICE_DEFAULT", + "name": "SERVICE_DEFAULT" + }, + { + "value": "FORCE_DOCUMENT_READ_ACTION", + "name": "FORCE_DOCUMENT_READ_ACTION" + } + ] + } + }, + "com.amazonaws.comprehend#DocumentReaderConfig": { + "type": "structure", + "members": { + "DocumentReadAction": { + "target": "com.amazonaws.comprehend#DocumentReadAction", + "traits": { + "smithy.api#documentation": "

This enum field will start with two values which will apply to PDFs:

\n ", + "smithy.api#required": {} + } + }, + "DocumentReadMode": { + "target": "com.amazonaws.comprehend#DocumentReadMode", + "traits": { + "smithy.api#documentation": "

This enum field provides two values:

\n " + } + }, + "FeatureTypes": { + "target": "com.amazonaws.comprehend#ListOfDocumentReadFeatureTypes", + "traits": { + "smithy.api#documentation": "

Specifies how the text in an input file should be processed:

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The input properties for a topic detection job.

" + } + }, "com.amazonaws.comprehend#DominantLanguage": { "type": "structure", "members": { @@ -3319,7 +3425,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:entity-recognizer/[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "smithy.api#pattern": "^arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:entity-recognizer/[a-zA-Z0-9](-*[a-zA-Z0-9])*(/version/[a-zA-Z0-9](-*[a-zA-Z0-9])*)?$" } }, "com.amazonaws.comprehend#EntityRecognizerAugmentedManifestsList": { @@ -3365,7 +3471,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:entity-recognizer-endpoint/[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "smithy.api#pattern": "^arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:entity-recognizer-endpoint/[a-zA-Z0-9](-*[a-zA-Z0-9])*$" } }, "com.amazonaws.comprehend#EntityRecognizerEntityList": { @@ -3750,7 +3856,7 @@ "min": 1, "max": 40 }, - "smithy.api#pattern": "[A-Z_]*" + "smithy.api#pattern": "^[A-Z_]*$" } }, "com.amazonaws.comprehend#EventsDetectionJobFilter": { @@ -3884,7 +3990,7 @@ "min": 20, "max": 2048 }, - "smithy.api#pattern": "arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+" + "smithy.api#pattern": "^arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+$" } }, "com.amazonaws.comprehend#InferenceUnitsInteger": { @@ -3911,10 +4017,16 @@ "traits": { "smithy.api#documentation": "

Specifies how the text in an input file should be processed:

\n " } + }, + "DocumentReaderConfig": { + "target": "com.amazonaws.comprehend#DocumentReaderConfig", + "traits": { + "smithy.api#documentation": "

The document reader config field applies only for InputDataConfig of StartEntitiesDetectionJob.

\n

Use DocumentReaderConfig to provide specifications about how you want your inference documents read.\n Currently it applies for PDF documents in StartEntitiesDetectionJob custom inference.

" + } } }, "traits": { - "smithy.api#documentation": "

The input properties for a topic detection job.

" + "smithy.api#documentation": "

The input properties for an inference job.

" } }, "com.amazonaws.comprehend#InputFormat": { @@ -4203,7 +4315,8 @@ "smithy.api#length": { "min": 0, "max": 2048 - } + }, + "smithy.api#pattern": ".*" } }, "com.amazonaws.comprehend#KmsKeyValidationException": { @@ -4880,6 +4993,18 @@ "target": "com.amazonaws.comprehend#BatchDetectSyntaxItemResult" } }, + "com.amazonaws.comprehend#ListOfDocumentReadFeatureTypes": { + "type": "list", + "member": { + "target": "com.amazonaws.comprehend#DocumentReadFeatureTypes" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2 + } + } + }, "com.amazonaws.comprehend#ListOfDominantLanguages": { "type": "list", "member": { @@ -5195,7 +5320,7 @@ "min": 1, "max": 1 }, - "smithy.api#pattern": "[!@#$%&*]" + "smithy.api#pattern": "^[!@#$%&*]$" } }, "com.amazonaws.comprehend#MaxResultsInteger": { @@ -5764,7 +5889,7 @@ "min": 0, "max": 1024 }, - "smithy.api#pattern": "s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/.*)?" + "smithy.api#pattern": "^s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/.*)?$" } }, "com.amazonaws.comprehend#SecurityGroupId": { @@ -5774,7 +5899,7 @@ "min": 1, "max": 32 }, - "smithy.api#pattern": "[-0-9a-zA-Z]+" + "smithy.api#pattern": "^[-0-9a-zA-Z]+$" } }, "com.amazonaws.comprehend#SecurityGroupIds": { @@ -7313,7 +7438,7 @@ "min": 1, "max": 32 }, - "smithy.api#pattern": "[-0-9a-zA-Z]+" + "smithy.api#pattern": "^[-0-9a-zA-Z]+$" } }, "com.amazonaws.comprehend#Subnets": { diff --git a/codegen/sdk-codegen/aws-models/ec2.2016-11-15.json b/codegen/sdk-codegen/aws-models/ec2.2016-11-15.json index 211bf8a6ddd..5fc35a63cc2 100644 --- a/codegen/sdk-codegen/aws-models/ec2.2016-11-15.json +++ b/codegen/sdk-codegen/aws-models/ec2.2016-11-15.json @@ -3045,6 +3045,10 @@ { "value": "arm64", "name": "arm64" + }, + { + "value": "x86_64_mac", + "name": "x86_64_mac" } ] } @@ -8409,7 +8413,7 @@ "target": "com.amazonaws.ec2#CopyImageResult" }, "traits": { - "smithy.api#documentation": "

Initiates the copy of an AMI. You can copy an AMI from one Region to another, or from a\n Region to an AWS Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost\n to another, or within the same Outpost. To copy an AMI to another partition, see CreateStoreImageTask.

\n \t\n \t

To copy an AMI from one Region to another, specify the source Region using the \n \t\tSourceRegion parameter, and specify the \n \t\tdestination Region using its endpoint. Copies of encrypted backing snapshots for\n \t\tthe AMI are encrypted. Copies of unencrypted backing snapshots remain unencrypted, \n \t\tunless you set Encrypted during the copy operation. You cannot \n \t\tcreate an unencrypted copy of an encrypted backing snapshot.

\n \t\n \t

To copy an AMI from a Region to an Outpost, specify the source Region using the \n \t\tSourceRegion parameter, and specify the \n \t\tARN of the destination Outpost using DestinationOutpostArn. \n \t\tBacking snapshots copied to an Outpost are encrypted by default using the default\n \t\tencryption key for the Region, or a different key that you specify in the request using \n \t\tKmsKeyId. Outposts do not support unencrypted \n \t\tsnapshots. For more information, \n \t\t\tAmazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.

\n \n

For more information about the prerequisites and limits when copying an AMI, see Copying an AMI\n in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

Initiates the copy of an AMI. You can copy an AMI from one Region to another, or from a\n Region to an Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost\n to another, or within the same Outpost. To copy an AMI to another partition, see CreateStoreImageTask.

\n \t\n \t

To copy an AMI from one Region to another, specify the source Region using the \n \t\tSourceRegion parameter, and specify the \n \t\tdestination Region using its endpoint. Copies of encrypted backing snapshots for\n \t\tthe AMI are encrypted. Copies of unencrypted backing snapshots remain unencrypted, \n \t\tunless you set Encrypted during the copy operation. You cannot \n \t\tcreate an unencrypted copy of an encrypted backing snapshot.

\n \t\n \t

To copy an AMI from a Region to an Outpost, specify the source Region using the \n \t\tSourceRegion parameter, and specify the \n \t\tARN of the destination Outpost using DestinationOutpostArn. \n \t\tBacking snapshots copied to an Outpost are encrypted by default using the default\n \t\tencryption key for the Region, or a different key that you specify in the request using \n \t\tKmsKeyId. Outposts do not support unencrypted \n \t\tsnapshots. For more information, \n \t\t\tAmazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.

\n \n

For more information about the prerequisites and limits when copying an AMI, see Copying an AMI\n in the Amazon Elastic Compute Cloud User Guide.

" } }, "com.amazonaws.ec2#CopyImageRequest": { @@ -8431,7 +8435,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "Encrypted", - "smithy.api#documentation": "

Specifies whether the destination snapshots of the copied image should be encrypted.\n You can encrypt a copy of an unencrypted snapshot, but you cannot create an unencrypted\n copy of an encrypted snapshot. The default CMK for EBS is used unless you specify a non-default \n AWS Key Management Service (AWS KMS) CMK using KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

Specifies whether the destination snapshots of the copied image should be encrypted.\n You can encrypt a copy of an unencrypted snapshot, but you cannot create an unencrypted\n copy of an encrypted snapshot. The default KMS key for Amazon EBS is used unless you specify a non-default \n Key Management Service (KMS) KMS key using KmsKeyId. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.

", "smithy.api#xmlName": "encrypted" } }, @@ -8439,7 +8443,7 @@ "target": "com.amazonaws.ec2#KmsKeyId", "traits": { "aws.protocols#ec2QueryName": "KmsKeyId", - "smithy.api#documentation": "

The identifier of the symmetric AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating\n encrypted volumes. If this parameter is not specified, your AWS managed CMK for EBS is used. \n If you specify a CMK, you must also set the encrypted state to true.

\n \t

You can specify a CMK using any of the following:

\n \t \n

AWS authenticates the CMK asynchronously. Therefore, if you specify an identifier that is not valid,\n the action can appear to complete, but eventually fails.

\n

The specified CMK must exist in the destination Region.

\n

Amazon EBS does not support asymmetric CMKs.

", + "smithy.api#documentation": "

The identifier of the symmetric Key Management Service (KMS) KMS key to use when creating\n \t\tencrypted volumes. If this parameter is not specified, your Amazon Web Services managed KMS key for Amazon EBS is used. \n \t\tIf you specify a KMS key, you must also set the encrypted state to true.

\n \t

You can specify a KMS key using any of the following:

\n \t \n \t

Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an identifier that is not valid,\n the action can appear to complete, but eventually fails.

\n \t

The specified KMS key must exist in the destination Region.

\n \t

Amazon EBS does not support asymmetric KMS keys.

", "smithy.api#xmlName": "kmsKeyId" } }, @@ -8467,7 +8471,7 @@ "DestinationOutpostArn": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Outpost to which to copy the AMI. Only \n \t\tspecify this parameter when copying an AMI from an AWS Region to an Outpost. \n \t\tThe AMI must be in the Region of the destination Outpost. You cannot copy an \n \t\tAMI from an Outpost to a Region, from one Outpost to another, or within the same \n \t\tOutpost.

\n \t\n \t

For more information, see \n \t\tCopying AMIs from an AWS Region to an Outpost in the \n \t\tAmazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Outpost to which to copy the AMI. Only \n \t\tspecify this parameter when copying an AMI from an Amazon Web Services Region to an Outpost. \n \t\tThe AMI must be in the Region of the destination Outpost. You cannot copy an \n \t\tAMI from an Outpost to a Region, from one Outpost to another, or within the same \n \t\tOutpost.

\n \t\n \t

For more information, see \n \t\tCopying AMIs from an Amazon Web Services Region to an Outpost in the \n \t\tAmazon Elastic Compute Cloud User Guide.

" } }, "DryRun": { @@ -9814,7 +9818,7 @@ "target": "com.amazonaws.ec2#CreateImageResult" }, "traits": { - "smithy.api#documentation": "

Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance \n \tthat is either running or stopped.

\n \n \n \n \t\n \t

If you customized your instance with instance store volumes or EBS volumes in addition to the root device volume, the \n \tnew AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, \n \tthe instance automatically launches with those additional volumes.

\n

For more information, see Creating Amazon EBS-Backed Linux AMIs \n\t\t\t\tin the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance \n \tthat is either running or stopped.

\n \n \n \n \t\n \t

If you customized your instance with instance store volumes or Amazon EBS volumes in addition to the root device volume, the \n \tnew AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, \n \tthe instance automatically launches with those additional volumes.

\n \t

For more information, see Creating Amazon EBS-Backed Linux AMIs \n\t\t\t\tin the Amazon Elastic Compute Cloud User Guide.

" } }, "com.amazonaws.ec2#CreateImageRequest": { @@ -9866,14 +9870,14 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "NoReboot", - "smithy.api#documentation": "

By default, Amazon EC2 attempts to shut down and reboot the instance before creating the image. If the No Reboot option is set, Amazon EC2 doesn't shut down the instance before creating the image. When this option is used, file system integrity on the created image can't be guaranteed.

", + "smithy.api#documentation": "

By default, Amazon EC2 attempts to shut down and reboot the instance before creating the image. \n If the No Reboot option is set, Amazon EC2 doesn't shut down the instance before creating \n the image. Without a reboot, the AMI will be crash consistent (all the volumes are snapshotted \n at the same time), but not application consistent (all the operating system buffers are not flushed \n to disk before the snapshots are created).

", "smithy.api#xmlName": "noReboot" } }, "TagSpecifications": { "target": "com.amazonaws.ec2#TagSpecificationList", "traits": { - "smithy.api#documentation": "

The tags to apply to the AMI and snapshots on creation. You can tag the AMI, the\n snapshots, or both.

\n \n

If you specify other values for ResourceType, the request fails.

\n

To tag an AMI or snapshot after it has been created, see CreateTags.

", + "smithy.api#documentation": "

The tags to apply to the AMI and snapshots on creation. You can tag the AMI, the\n snapshots, or both.

\n \n

If you specify other values for ResourceType, the request fails.

\n

To tag an AMI or snapshot after it has been created, see CreateTags.

", "smithy.api#xmlName": "TagSpecification" } } @@ -10397,7 +10401,7 @@ "target": "com.amazonaws.ec2#CreateManagedPrefixListResult" }, "traits": { - "smithy.api#documentation": "

Creates a managed prefix list. You can specify one or more entries for the prefix list. Each entry consists of a CIDR block and an optional description.

\n

You must specify the maximum number of entries for the prefix list. The maximum number of entries cannot be changed later.

" + "smithy.api#documentation": "

Creates a managed prefix list. You can specify one or more entries for the prefix list. \n Each entry consists of a CIDR block and an optional description.

" } }, "com.amazonaws.ec2#CreateManagedPrefixListRequest": { @@ -11225,7 +11229,7 @@ "target": "com.amazonaws.ec2#CreateRestoreImageTaskResult" }, "traits": { - "smithy.api#documentation": "

Starts a task that restores an AMI from an S3 object that was previously created by using\n CreateStoreImageTask.

\n

To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the\n Amazon Elastic Compute Cloud User Guide.

\n

For more information, see Store and restore an AMI using\n S3 in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

Starts a task that restores an AMI from an Amazon S3 object that was previously created by using\n CreateStoreImageTask.

\n

To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon Elastic Compute Cloud User Guide.

\n

For more information, see Store and restore an AMI using\n \tAmazon S3 in the Amazon Elastic Compute Cloud User Guide.

" } }, "com.amazonaws.ec2#CreateRestoreImageTaskRequest": { @@ -11234,7 +11238,7 @@ "Bucket": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The name of the S3 bucket that contains the stored AMI object.

", + "smithy.api#documentation": "

The name of the Amazon S3 bucket that contains the stored AMI object.

", "smithy.api#required": {} } }, @@ -11737,7 +11741,7 @@ "target": "com.amazonaws.ec2#CreateStoreImageTaskResult" }, "traits": { - "smithy.api#documentation": "

Stores an AMI as a single object in an S3 bucket.

\n

To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the\n Amazon Elastic Compute Cloud User Guide.

\n

For more information, see Store and restore an AMI using\n S3 in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

Stores an AMI as a single object in an Amazon S3 bucket.

\n

To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon Elastic Compute Cloud User Guide.

\n

For more information, see Store and restore an AMI using\n \tAmazon S3 in the Amazon Elastic Compute Cloud User Guide.

" } }, "com.amazonaws.ec2#CreateStoreImageTaskRequest": { @@ -11753,14 +11757,14 @@ "Bucket": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The name of the S3 bucket in which the AMI object will be stored. The bucket must be in\n the Region in which the request is being made. The AMI object appears in the bucket only after\n the upload task has completed.

", + "smithy.api#documentation": "

The name of the Amazon S3 bucket in which the AMI object will be stored. The bucket must be in\n the Region in which the request is being made. The AMI object appears in the bucket only after\n the upload task has completed.

", "smithy.api#required": {} } }, "S3ObjectTags": { "target": "com.amazonaws.ec2#S3ObjectTagList", "traits": { - "smithy.api#documentation": "

The tags to apply to the AMI object that will be stored in the S3 bucket.

", + "smithy.api#documentation": "

The tags to apply to the AMI object that will be stored in the Amazon S3 bucket.

", "smithy.api#xmlName": "S3ObjectTag" } }, @@ -13481,6 +13485,13 @@ "com.amazonaws.ec2#CreateVpcRequest": { "type": "structure", "members": { + "CidrBlock": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

The IPv4 network range for the VPC, in CIDR notation. For example,\n\t\t 10.0.0.0/16. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

", + "smithy.api#required": {} + } + }, "AmazonProvidedIpv6CidrBlock": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -13529,13 +13540,6 @@ "smithy.api#documentation": "

The tags to assign to the VPC.

", "smithy.api#xmlName": "TagSpecification" } - }, - "CidrBlock": { - "target": "com.amazonaws.ec2#String", - "traits": { - "smithy.api#documentation": "

The IPv4 network range for the VPC, in CIDR notation. For example,\n\t\t 10.0.0.0/16. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18, we modify it to 100.68.0.0/18.

", - "smithy.api#required": {} - } } } }, @@ -16638,7 +16642,7 @@ "target": "com.amazonaws.ec2#DeregisterImageRequest" }, "traits": { - "smithy.api#documentation": "

Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch\n\t\t\tnew instances; however, it doesn't affect any instances that you've already launched\n\t\t\tfrom the AMI. You'll continue to incur usage costs for those instances until you\n\t\t\tterminate them.

\n

When you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was\n\t\t\tcreated for the root volume of the instance during the AMI creation process. When you\n\t\t\tderegister an instance store-backed AMI, it doesn't affect the files that you uploaded\n\t\t\tto Amazon S3 when you created the AMI.

" + "smithy.api#documentation": "

Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch\n\t\t\tnew instances; however, it doesn't affect any instances that you've already launched\n\t\t\tfrom the AMI. You'll continue to incur usage costs for those instances until you\n\t\t\tterminate them.

\n \t

When you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was\n\t\t\tcreated for the root volume of the instance during the AMI creation process. When you\n\t\t\tderegister an instance store-backed AMI, it doesn't affect the files that you uploaded\n\t\t\tto Amazon S3 when you created the AMI.

" } }, "com.amazonaws.ec2#DeregisterImageRequest": { @@ -19879,7 +19883,7 @@ "target": "com.amazonaws.ec2#DescribeImagesResult" }, "traits": { - "smithy.api#documentation": "

Describes the specified images (AMIs, AKIs, and ARIs) available to you or all of the images available to you.

\n

The images available to you include public images, private images that you own, and private images owned by other AWS accounts for which you have explicit launch permissions.

\n

Recently deregistered images appear in the returned results for a short interval and then\n return empty results. After all instances that reference a deregistered AMI are terminated,\n specifying the ID of the image will eventually return an error indicating that the AMI ID\n cannot be found.

", + "smithy.api#documentation": "

Describes the specified images (AMIs, AKIs, and ARIs) available to you or all of the images available to you.

\n

The images available to you include public images, private images that you own, and private images owned by other \n Amazon Web Services accounts for which you have explicit launch permissions.

\n

Recently deregistered images appear in the returned results for a short interval and then\n return empty results. After all instances that reference a deregistered AMI are terminated,\n specifying the ID of the image will eventually return an error indicating that the AMI ID\n cannot be found.

", "smithy.api#suppress": [ "WaitableTraitInvalidErrorType" ], @@ -19939,14 +19943,14 @@ "ExecutableUsers": { "target": "com.amazonaws.ec2#ExecutableByStringList", "traits": { - "smithy.api#documentation": "

Scopes the images by users with explicit launch permissions. \n\t\t\t\tSpecify an AWS account ID, self (the sender of the request),\n\t\t\t\tor all (public AMIs).

", + "smithy.api#documentation": "

Scopes the images by users with explicit launch permissions. \n Specify an Amazon Web Services account ID, self (the sender of the request),\n\t\t\t\tor all (public AMIs).

", "smithy.api#xmlName": "ExecutableBy" } }, "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

The filters.

\n ", + "smithy.api#documentation": "

The filters.

\n ", "smithy.api#xmlName": "Filter" } }, @@ -19960,7 +19964,7 @@ "Owners": { "target": "com.amazonaws.ec2#OwnerStringList", "traits": { - "smithy.api#documentation": "

Scopes the results to images with the specified owners. You can specify a combination of \n AWS account IDs, self, amazon, and aws-marketplace. \n If you omit this parameter, the results include all images for which you have launch permissions, \n regardless of ownership.

", + "smithy.api#documentation": "

Scopes the results to images with the specified owners. You can specify a combination of \n Amazon Web Services account IDs, self, amazon, and aws-marketplace. \n If you omit this parameter, the results include all images for which you have launch permissions, \n regardless of ownership.

", "smithy.api#xmlName": "Owner" } }, @@ -20627,7 +20631,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

One or more filters. Filter names and values are case-sensitive.

\n ", + "smithy.api#documentation": "

One or more filters. Filter names and values are case-sensitive.

\n ", "smithy.api#xmlName": "Filter" } }, @@ -24872,7 +24876,7 @@ "target": "com.amazonaws.ec2#DescribeStoreImageTasksResult" }, "traits": { - "smithy.api#documentation": "

Describes the progress of the AMI store tasks. You can describe the store tasks for\n specified AMIs. If you don't specify the AMIs, you get a paginated list of store tasks from\n the last 31 days.

\n

For each AMI task, the response indicates if the task is InProgress,\n Completed, or Failed. For tasks InProgress, the\n response shows the estimated progress as a percentage.

\n

Tasks are listed in reverse chronological order. Currently, only tasks from the past 31\n days can be viewed.

\n

To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the\n Amazon Elastic Compute Cloud User Guide.

\n

For more information, see Store and restore an AMI using\n S3 in the Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

Describes the progress of the AMI store tasks. You can describe the store tasks for\n specified AMIs. If you don't specify the AMIs, you get a paginated list of store tasks from\n the last 31 days.

\n

For each AMI task, the response indicates if the task is InProgress,\n Completed, or Failed. For tasks InProgress, the\n response shows the estimated progress as a percentage.

\n

Tasks are listed in reverse chronological order. Currently, only tasks from the past 31\n days can be viewed.

\n

To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon Elastic Compute Cloud User Guide.

\n

For more information, see Store and restore an AMI using\n \tAmazon S3 in the Amazon Elastic Compute Cloud User Guide.

", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -29300,7 +29304,7 @@ } }, "SnapshotId": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#SnapshotId", "traits": { "aws.protocols#ec2QueryName": "SnapshotId", "smithy.api#documentation": "

The ID of the snapshot.

", @@ -30699,7 +30703,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "EventSubType", - "smithy.api#documentation": "

The event.

\n\n

The following are the error events:

\n \n\n

The following are the fleetRequestChange events:

\n \n\n

The following are the instanceChange events:

\n \n\n

The following are the Information events:

\n ", + "smithy.api#documentation": "

The event.

\n\n

The following are the error events:

\n \n\n

The following are the fleetRequestChange events:

\n \n\n

The following are the instanceChange events:

\n \n\n

The following are the Information events:

\n ", "smithy.api#xmlName": "eventSubType" } }, @@ -35933,7 +35937,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "ImageOwnerId", - "smithy.api#documentation": "

The AWS account ID of the image owner.

", + "smithy.api#documentation": "

The ID of the Amazon Web Services account that owns the image.

", "smithy.api#xmlName": "imageOwnerId" } }, @@ -35949,7 +35953,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "PlatformDetails", - "smithy.api#documentation": "

The platform details associated with the billing code of the AMI. For more information,\n see Obtaining\n Billing Information in the Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

The platform details associated with the billing code of the AMI. For more information,\n see Understanding \n AMI billing in the Amazon Elastic Compute Cloud User Guide.

", "smithy.api#xmlName": "platformDetails" } }, @@ -35957,7 +35961,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "UsageOperation", - "smithy.api#documentation": "

The operation of the Amazon EC2 instance and the billing code that is associated with the AMI.\n usageOperation corresponds to the lineitem/Operation column on your AWS Cost and Usage Report and in the AWS Price\n List API. For the list of UsageOperation codes, see Platform Details and Usage Operation Billing Codes in the\n Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

The operation of the Amazon EC2 instance and the billing code that is associated with the AMI.\n usageOperation corresponds to the lineitem/Operation column on your Amazon Web Services Cost and Usage Report and in the Amazon Web Services Price\n \tList API. You can view these fields on the Instances or \n \tAMIs pages in the Amazon EC2 console, or in the responses that are \n \treturned by the DescribeImages \n \tcommand in the Amazon EC2 API, or the describe-images \n \tcommand in the CLI.

", "smithy.api#xmlName": "usageOperation" } }, @@ -36021,7 +36025,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "ImageOwnerAlias", - "smithy.api#documentation": "

The AWS account alias (for example, amazon, self) or\n the AWS account ID of the AMI owner.

", + "smithy.api#documentation": "

The Amazon Web Services account alias (for example, amazon, self) or\n the Amazon Web Services account ID of the AMI owner.

", "smithy.api#xmlName": "imageOwnerAlias" } }, @@ -36045,7 +36049,7 @@ "target": "com.amazonaws.ec2#DeviceType", "traits": { "aws.protocols#ec2QueryName": "RootDeviceType", - "smithy.api#documentation": "

The type of root device used by the AMI. The AMI can use an EBS volume or an instance store volume.

", + "smithy.api#documentation": "

The type of root device used by the AMI. The AMI can use an Amazon EBS volume or an instance store volume.

", "smithy.api#xmlName": "rootDeviceType" } }, @@ -38406,7 +38410,7 @@ "CpuCredits": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The credit option for CPU usage of the instance. Valid values are\n standard and unlimited.

" + "smithy.api#documentation": "

The credit option for CPU usage of the instance. Valid values are\n standard and unlimited.

\n

T3 instances with host tenancy do not support the unlimited \n CPU credit option.

" } } }, @@ -41703,6 +41707,18 @@ { "value": "x2gd.metal", "name": "x2gd_metal" + }, + { + "value": "vt1.3xlarge", + "name": "vt1_3xlarge" + }, + { + "value": "vt1.6xlarge", + "name": "vt1_6xlarge" + }, + { + "value": "vt1.24xlarge", + "name": "vt1_24xlarge" } ] } @@ -42828,7 +42844,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "UserId", - "smithy.api#documentation": "

The AWS account ID.

\n

Constraints: Up to 10 000 account IDs can be specified in a single request.

", + "smithy.api#documentation": "

The Amazon Web Services account ID.

\n

Constraints: Up to 10 000 account IDs can be specified in a single request.

", "smithy.api#xmlName": "userId" } } @@ -42852,13 +42868,13 @@ "Add": { "target": "com.amazonaws.ec2#LaunchPermissionList", "traits": { - "smithy.api#documentation": "

The AWS account ID to add to the list of launch permissions for the AMI.

" + "smithy.api#documentation": "

The Amazon Web Services account ID to add to the list of launch permissions for the AMI.

" } }, "Remove": { "target": "com.amazonaws.ec2#LaunchPermissionList", "traits": { - "smithy.api#documentation": "

The AWS account ID to remove from the list of launch permissions for the AMI.

" + "smithy.api#documentation": "

The Amazon Web Services account ID to remove from the list of launch permissions for the AMI.

" } } }, @@ -45586,7 +45602,7 @@ "target": "com.amazonaws.ec2#PrefixListState", "traits": { "aws.protocols#ec2QueryName": "State", - "smithy.api#documentation": "

The state of the prefix list.

", + "smithy.api#documentation": "

The current state of the prefix list.

", "smithy.api#xmlName": "state" } }, @@ -45904,7 +45920,7 @@ "InstanceCount": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

The number of instances for which to reserve capacity.

\n\t \t

Valid range: 1 - 1000

" + "smithy.api#documentation": "

The number of instances for which to reserve capacity. The number of instances can't be increased or \n\t\t \tdecreased by more than 1000 in a single request.

" } }, "EndDate": { @@ -46453,7 +46469,7 @@ "target": "com.amazonaws.ec2#ModifyImageAttributeRequest" }, "traits": { - "smithy.api#documentation": "

Modifies the specified attribute of the specified AMI. You can specify only one attribute at a time.\n You can use the Attribute parameter to specify the attribute or one of the following parameters: \n Description, LaunchPermission, or ProductCode.

\n

AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace product code cannot be made public.

\n

To enable the SriovNetSupport enhanced networking attribute of an image, enable SriovNetSupport on an instance \n and create an AMI from the instance.

" + "smithy.api#documentation": "

Modifies the specified attribute of the specified AMI. You can specify only one attribute at a time.\n You can use the Attribute parameter to specify the attribute or one of the following parameters: \n Description or LaunchPermission.

\n \t

Images with an Amazon Web Services Marketplace product code cannot be made public.

\n

To enable the SriovNetSupport enhanced networking attribute of an image, enable SriovNetSupport on an instance \n and create an AMI from the instance.

" } }, "com.amazonaws.ec2#ModifyImageAttributeRequest": { @@ -46462,7 +46478,7 @@ "Attribute": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The name of the attribute to modify. \n The valid values are description, launchPermission, and productCodes.

" + "smithy.api#documentation": "

The name of the attribute to modify. \n The valid values are description and launchPermission.

" } }, "Description": { @@ -46493,7 +46509,7 @@ "ProductCodes": { "target": "com.amazonaws.ec2#ProductCodeStringList", "traits": { - "smithy.api#documentation": "

The DevPay product codes. After you add a product code to an AMI, it can't be removed.

", + "smithy.api#documentation": "

Not supported.

", "smithy.api#xmlName": "ProductCode" } }, @@ -46507,14 +46523,14 @@ "UserIds": { "target": "com.amazonaws.ec2#UserIdStringList", "traits": { - "smithy.api#documentation": "

The AWS account IDs. \n This parameter can be used only when the Attribute parameter is launchPermission.

", + "smithy.api#documentation": "

The Amazon Web Services account IDs. \n This parameter can be used only when the Attribute parameter is launchPermission.

", "smithy.api#xmlName": "UserId" } }, "Value": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The value of the attribute being modified. \n This parameter can be used only when the Attribute parameter is description or productCodes.

" + "smithy.api#documentation": "

The value of the attribute being modified. \n This parameter can be used only when the Attribute parameter is description.

" } }, "DryRun": { @@ -47021,7 +47037,7 @@ "target": "com.amazonaws.ec2#HostTenancy", "traits": { "aws.protocols#ec2QueryName": "Tenancy", - "smithy.api#documentation": "

The tenancy for the instance.

", + "smithy.api#documentation": "

The tenancy for the instance.

\n \n

For T3 instances, you can't change the tenancy from dedicated to \n host, or from host to dedicated. Attempting \n to make one of these unsupported tenancy changes results in the InvalidTenancy \n error code.

", "smithy.api#xmlName": "tenancy" } }, @@ -47170,7 +47186,7 @@ "MaxEntries": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

The maximum number of entries for the prefix list. You cannot modify the entries \n of a prefix list and modify the size of a prefix list at the same time.

" + "smithy.api#documentation": "

The maximum number of entries for the prefix list. You cannot modify the entries \n of a prefix list and modify the size of a prefix list at the same time.

\n

If any of the resources that reference the prefix list cannot support the new\n maximum size, the modify operation fails. Check the state message for the IDs of \n the first ten resources that do not support the new maximum size.

" } } } @@ -51547,7 +51563,7 @@ "target": "com.amazonaws.ec2#Tenancy", "traits": { "aws.protocols#ec2QueryName": "Tenancy", - "smithy.api#documentation": "

The tenancy of the instance (if the instance is running in a VPC). An instance with a\n tenancy of dedicated runs on single-tenant hardware. The host\n tenancy is not supported for the ImportInstance command.

\n

This parameter is not supported by CreateFleet.

", + "smithy.api#documentation": "

The tenancy of the instance (if the instance is running in a VPC). An instance with a\n tenancy of dedicated runs on single-tenant hardware. The host\n tenancy is not supported for the ImportInstance command.

\n

This parameter is not supported by CreateFleet.

\n \n

T3 instances that use the unlimited CPU credit option do not support host tenancy.

", "smithy.api#xmlName": "tenancy" } }, @@ -51729,7 +51745,7 @@ "type": "structure", "members": { "GroupName": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#PlacementGroupName", "traits": { "aws.protocols#ec2QueryName": "GroupName", "smithy.api#documentation": "

The name of the placement group that the instance is in.

", @@ -53384,7 +53400,7 @@ "target": "com.amazonaws.ec2#RegisterImageResult" }, "traits": { - "smithy.api#documentation": "

Registers an AMI. When you're creating an AMI, this is the final step you must complete\n before you can launch an instance from the AMI. For more information about creating AMIs, see\n Creating your\n own AMIs in the Amazon Elastic Compute Cloud User Guide.

\n \n

For Amazon EBS-backed instances, CreateImage creates and registers \n \tthe AMI in a single request, so you don't have to register the AMI yourself.

\n
\n\n

If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. \n If you make changes to an image, deregister the previous image and register the new image.

\n\n

\n Register a snapshot of a root device volume\n

\n

You can use RegisterImage to create an Amazon EBS-backed Linux AMI from\n a snapshot of a root device volume. You specify the snapshot using a block device mapping.\n You can't set the encryption state of the volume using the block device mapping. If the \n snapshot is encrypted, or encryption by default is enabled, the root volume of an instance \n launched from the AMI is encrypted.

\n

For more information, see Create a Linux AMI from a snapshot and Use encryption with EBS-backed AMIs\n in the Amazon Elastic Compute Cloud User Guide.

\n \n

\n AWS Marketplace product codes\n

\n

If any snapshots have AWS Marketplace product codes, they are copied to the new\n AMI.

\n

Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE\n Linux Enterprise Server (SLES), use the EC2 billing product code associated with an AMI to\n verify the subscription status for package updates. To create a new AMI for operating systems\n that require a billing product code, instead of registering the AMI, do the following to\n preserve the billing product code association:

\n
    \n
  1. \n

    Launch an instance from an existing AMI with that billing product code.

    \n
  2. \n
  3. \n

    Customize the instance.

    \n
  4. \n
  5. \n

    Create an AMI from the instance using CreateImage.

    \n
  6. \n
\n

If you purchase a Reserved Instance to apply to an On-Demand Instance that was launched\n from an AMI with a billing product code, make sure that the Reserved Instance has the matching\n billing product code. If you purchase a Reserved Instance without the matching billing product\n code, the Reserved Instance will not be applied to the On-Demand Instance. For information\n about how to obtain the platform details and billing information of an AMI, see Obtaining billing\n information in the Amazon Elastic Compute Cloud User Guide.

" + "smithy.api#documentation": "

Registers an AMI. When you're creating an AMI, this is the final step you must complete\n before you can launch an instance from the AMI. For more information about creating AMIs, see\n Creating your\n own AMIs in the Amazon Elastic Compute Cloud User Guide.

\n \n \t

For Amazon EBS-backed instances, CreateImage creates and registers \n \tthe AMI in a single request, so you don't have to register the AMI yourself.

\n
\n\n

If needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. \n If you make changes to an image, deregister the previous image and register the new image.

\n\n

\n Register a snapshot of a root device volume\n

\n \t

You can use RegisterImage to create an Amazon EBS-backed Linux AMI from\n a snapshot of a root device volume. You specify the snapshot using a block device mapping.\n You can't set the encryption state of the volume using the block device mapping. If the \n snapshot is encrypted, or encryption by default is enabled, the root volume of an instance \n launched from the AMI is encrypted.

\n

For more information, see Create a Linux AMI from a snapshot and Use encryption with Amazon EBS-backed AMIs\n in the Amazon Elastic Compute Cloud User Guide.

\n \n \t

\n Amazon Web Services Marketplace product codes\n

\n \t

If any snapshots have Amazon Web Services Marketplace product codes, they are copied to the new\n AMI.

\n

Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE\n Linux Enterprise Server (SLES), use the Amazon EC2 billing product code associated with an AMI to\n verify the subscription status for package updates. To create a new AMI for operating systems\n that require a billing product code, instead of registering the AMI, do the following to\n preserve the billing product code association:

\n
    \n
  1. \n

    Launch an instance from an existing AMI with that billing product code.

    \n
  2. \n
  3. \n

    Customize the instance.

    \n
  4. \n
  5. \n

    Create an AMI from the instance using CreateImage.

    \n
  6. \n
\n

If you purchase a Reserved Instance to apply to an On-Demand Instance that was launched\n from an AMI with a billing product code, make sure that the Reserved Instance has the matching\n billing product code. If you purchase a Reserved Instance without the matching billing product\n code, the Reserved Instance will not be applied to the On-Demand Instance. For information\n about how to obtain the platform details and billing information of an AMI, see Understanding AMI \n \tbilling in the Amazon Elastic Compute Cloud User Guide.

" } }, "com.amazonaws.ec2#RegisterImageRequest": { @@ -53400,14 +53416,14 @@ "target": "com.amazonaws.ec2#ArchitectureValues", "traits": { "aws.protocols#ec2QueryName": "Architecture", - "smithy.api#documentation": "

The architecture of the AMI.

\n

Default: For Amazon EBS-backed AMIs, i386.\n For instance store-backed AMIs, the architecture specified in the manifest file.

", + "smithy.api#documentation": "

The architecture of the AMI.

\n \t

Default: For Amazon EBS-backed AMIs, i386.\n For instance store-backed AMIs, the architecture specified in the manifest file.

", "smithy.api#xmlName": "architecture" } }, "BlockDeviceMappings": { "target": "com.amazonaws.ec2#BlockDeviceMappingRequestList", "traits": { - "smithy.api#documentation": "

The block device mapping entries.

\n

If you specify an EBS volume using the ID of an EBS snapshot, you can't specify the encryption state of the volume.

\n

If you create an AMI on an Outpost, then all backing snapshots must be on the same Outpost or in the Region \n \t of that Outpost. AMIs on an Outpost that include local snapshots can be used to launch instances on the same Outpost \n \t only. For more information, \n \t \tAmazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.

", + "smithy.api#documentation": "

The block device mapping entries.

\n \t

If you specify an Amazon EBS volume using the ID of an Amazon EBS snapshot, you can't specify the encryption state of the volume.

\n

If you create an AMI on an Outpost, then all backing snapshots must be on the same Outpost or in the Region \n \t of that Outpost. AMIs on an Outpost that include local snapshots can be used to launch instances on the same Outpost \n \t only. For more information, \n \t \tAmazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.

", "smithy.api#xmlName": "BlockDeviceMapping" } }, @@ -53455,7 +53471,7 @@ "BillingProducts": { "target": "com.amazonaws.ec2#BillingProductList", "traits": { - "smithy.api#documentation": "

The billing product codes. Your account must be authorized to specify billing product codes. Otherwise,\n you can use the AWS Marketplace to bill for the use of an AMI.

", + "smithy.api#documentation": "

The billing product codes. Your account must be authorized to specify billing product codes. Otherwise,\n \tyou can use the Amazon Web Services Marketplace to bill for the use of an AMI.

", "smithy.api#xmlName": "BillingProduct" } }, @@ -56288,7 +56304,7 @@ "target": "com.amazonaws.ec2#ResetImageAttributeRequest" }, "traits": { - "smithy.api#documentation": "

Resets an attribute of an AMI to its default value.

\n \n

The productCodes attribute can't be reset.

\n
" + "smithy.api#documentation": "

Resets an attribute of an AMI to its default value.

" } }, "com.amazonaws.ec2#ResetImageAttributeName": { @@ -56478,6 +56494,10 @@ "type": "string", "traits": { "smithy.api#enum": [ + { + "value": "capacity-reservation", + "name": "capacity_reservation" + }, { "value": "client-vpn-endpoint", "name": "client_vpn_endpoint" @@ -56486,6 +56506,10 @@ "value": "customer-gateway", "name": "customer_gateway" }, + { + "value": "carrier-gateway", + "name": "carrier_gateway" + }, { "value": "dedicated-host", "name": "dedicated_host" @@ -56550,6 +56574,14 @@ "value": "internet-gateway", "name": "internet_gateway" }, + { + "value": "ipv4pool-ec2", + "name": "ipv4pool_ec2" + }, + { + "value": "ipv6pool-ec2", + "name": "ipv6pool_ec2" + }, { "value": "key-pair", "name": "key_pair" @@ -56558,10 +56590,30 @@ "value": "launch-template", "name": "launch_template" }, + { + "value": "local-gateway", + "name": "local_gateway" + }, + { + "value": "local-gateway-route-table", + "name": "local_gateway_route_table" + }, + { + "value": "local-gateway-virtual-interface", + "name": "local_gateway_virtual_interface" + }, + { + "value": "local-gateway-virtual-interface-group", + "name": "local_gateway_virtual_interface_group" + }, { "value": "local-gateway-route-table-vpc-association", "name": "local_gateway_route_table_vpc_association" }, + { + "value": "local-gateway-route-table-virtual-interface-group-association", + "name": "local_gateway_route_table_virtual_interface_group_association" + }, { "value": "natgateway", "name": "natgateway" @@ -56586,6 +56638,14 @@ "value": "placement-group", "name": "placement_group" }, + { + "value": "prefix-list", + "name": "prefix_list" + }, + { + "value": "replace-root-volume-task", + "name": "replace_root_volume_task" + }, { "value": "reserved-instances", "name": "reserved_instances" @@ -56658,6 +56718,14 @@ "value": "vpc", "name": "vpc" }, + { + "value": "vpc-endpoint", + "name": "vpc_endpoint" + }, + { + "value": "vpc-endpoint-service", + "name": "vpc_endpoint_service" + }, { "value": "vpc-peering-connection", "name": "vpc_peering_connection" @@ -58016,7 +58084,7 @@ "CreditSpecification": { "target": "com.amazonaws.ec2#CreditSpecificationRequest", "traits": { - "smithy.api#documentation": "

The credit option for CPU usage of the burstable performance instance. Valid values\n are standard and unlimited. To change this attribute after\n launch, use \n ModifyInstanceCreditSpecification. For more information, see Burstable\n performance instances in the Amazon EC2 User Guide.

\n

Default: standard (T2 instances) or unlimited (T3/T3a\n instances)

" + "smithy.api#documentation": "

The credit option for CPU usage of the burstable performance instance. Valid values\n are standard and unlimited. To change this attribute after\n launch, use \n ModifyInstanceCreditSpecification. For more information, see Burstable\n performance instances in the Amazon EC2 User Guide.

\n

Default: standard (T2 instances) or unlimited (T3/T3a\n instances)

\n \n

For T3 instances with host tenancy, only standard is \n supported.

" } }, "CpuOptions": { @@ -58144,7 +58212,7 @@ } }, "traits": { - "smithy.api#documentation": "

The tags to apply to the AMI object that will be stored in the S3 bucket. For more\n information, see Categorizing your storage using\n tags in the Amazon Simple Storage Service User Guide.

" + "smithy.api#documentation": "

The tags to apply to the AMI object that will be stored in the Amazon S3 bucket. For more\n information, see Categorizing your storage using\n tags in the Amazon Simple Storage Service User Guide.

" } }, "com.amazonaws.ec2#S3ObjectTagList": { @@ -58162,7 +58230,7 @@ "AWSAccessKeyId": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

The access key ID of the owner of the bucket. Before you specify a value for your access key ID, review and follow the guidance \n in Best Practices for Managing AWS Access Keys.

" + "smithy.api#documentation": "

The access key ID of the owner of the bucket. Before you specify a value for your access key ID, review and follow the guidance \n in Best Practices for Managing Amazon Web Services Access Keys.

" } }, "Bucket": { @@ -58199,7 +58267,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes the storage parameters for S3 and S3 buckets for an instance store-backed AMI.

" + "smithy.api#documentation": "

Describes the storage parameters for Amazon S3 and Amazon S3 buckets for an instance store-backed AMI.

" } }, "com.amazonaws.ec2#ScheduledInstance": { @@ -60634,7 +60702,7 @@ } }, "ImageId": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#ImageId", "traits": { "aws.protocols#ec2QueryName": "ImageId", "smithy.api#documentation": "

The ID of the AMI.

", @@ -60658,7 +60726,7 @@ } }, "KeyName": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#KeyPairName", "traits": { "aws.protocols#ec2QueryName": "KeyName", "smithy.api#documentation": "

The name of the key pair.

", @@ -60706,7 +60774,7 @@ } }, "SubnetId": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#SubnetId", "traits": { "aws.protocols#ec2QueryName": "SubnetId", "smithy.api#documentation": "

The IDs of the subnets in which to launch the instances. To specify multiple subnets, separate\n them using commas; for example, \"subnet-1234abcdeexample1, subnet-0987cdef6example2\".

", @@ -61569,7 +61637,7 @@ } }, "GroupName": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#PlacementGroupName", "traits": { "aws.protocols#ec2QueryName": "GroupName", "smithy.api#documentation": "

The name of the placement group.

", @@ -61785,7 +61853,7 @@ "target": "com.amazonaws.ec2#StartInstancesResult" }, "traits": { - "smithy.api#documentation": "

Starts an Amazon EBS-backed instance that you've previously stopped.

\n

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and\n started. When an instance is stopped, the compute resources are released and you are not\n billed for instance usage. However, your root partition Amazon EBS volume remains and\n continues to persist your data, and you are charged for Amazon EBS volume usage. You can\n restart your instance at any time. Every time you start your instance, Amazon EC2\n charges a one-minute minimum for instance usage, and thereafter charges per second for\n instance usage.

\n

Before stopping an instance, make sure it is in a state from which it can be\n restarted. Stopping an instance does not preserve data stored in RAM.

\n

Performing this operation on an instance that uses an instance store as its root\n device returns an error.

\n

For more information, see Stopping instances in the\n Amazon EC2 User Guide.

" + "smithy.api#documentation": "

Starts an Amazon EBS-backed instance that you've previously stopped.

\n

Instances that use Amazon EBS volumes as their root devices can be quickly stopped and\n started. When an instance is stopped, the compute resources are released and you are not\n billed for instance usage. However, your root partition Amazon EBS volume remains and\n continues to persist your data, and you are charged for Amazon EBS volume usage. You can\n restart your instance at any time. Every time you start your instance, Amazon EC2\n charges a one-minute minimum for instance usage, and thereafter charges per second for\n instance usage.

\n

Before stopping an instance, make sure it is in a state from which it can be\n restarted. Stopping an instance does not preserve data stored in RAM.

\n

Performing this operation on an instance that uses an instance store as its root\n device returns an error.

\n \n

If you attempt to start a T3 instance with host tenancy and the unlimted \n CPU credit option, the request fails. The unlimited CPU credit option is not \n supported on Dedicated Hosts. Before you start the instance, either change its CPU credit \n option to standard, or change its tenancy to default or dedicated.

\n \n

For more information, see Stopping instances in the\n Amazon EC2 User Guide.

" } }, "com.amazonaws.ec2#StartInstancesRequest": { @@ -62187,7 +62255,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "Bucket", - "smithy.api#documentation": "

The name of the S3 bucket that contains the stored AMI object.

", + "smithy.api#documentation": "

The name of the Amazon S3 bucket that contains the stored AMI object.

", "smithy.api#xmlName": "bucket" } }, @@ -62818,7 +62886,7 @@ "target": "com.amazonaws.ec2#ResourceType", "traits": { "aws.protocols#ec2QueryName": "ResourceType", - "smithy.api#documentation": "

The type of resource to tag. Currently, the resource types that support tagging on\n creation are: capacity-reservation | carrier-gateway |\n client-vpn-endpoint | customer-gateway |\n \t dedicated-host | dhcp-options | egress-only-internet-gateway | elastic-ip | elastic-gpu |\n \t export-image-task\n | export-instance-task | fleet | fpga-image |\n \t host-reservation | image| import-image-task |\n \t import-snapshot-task | instance | instance-event-window |\n internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 |\n \t key-pair | launch-template | local-gateway-route-table-vpc-association | placement-group |\n \t prefix-list | natgateway | network-acl | network-interface | \n \t reserved-instances |route-table | security-group| snapshot | spot-fleet-request\n | spot-instances-request | snapshot | subnet |\n traffic-mirror-filter | traffic-mirror-session |\n traffic-mirror-target | transit-gateway |\n \t transit-gateway-attachment | transit-gateway-multicast-domain | transit-gateway-route-table |\n volume |vpc | vpc-peering-connection |\n vpc-endpoint (for interface and gateway endpoints) |\n \t vpc-endpoint-service (for Amazon Web Services PrivateLink) | vpc-flow-log |\n vpn-connection | vpn-gateway.

\n

To tag a resource after it has been created, see CreateTags.

", + "smithy.api#documentation": "

The type of resource to tag on creation. The possible values are: \n \t capacity-reservation | carrier-gateway |\n client-vpn-endpoint | customer-gateway |\n \t dedicated-host | dhcp-options | \n \t egress-only-internet-gateway | elastic-gpu | \n \t elastic-ip | export-image-task |\n export-instance-task | fleet | fpga-image |\n \t host-reservation | image | import-image-task |\n \t import-snapshot-task | instance | instance-event-window |\n internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 |\n \t key-pair | launch-template | local-gateway-route-table-vpc-association |\n \t natgateway | network-acl | network-insights-analysis | \n \t network-insights-path | network-interface | \n \t placement-group | prefix-list | reserved-instances | \n \t route-table | security-group | security-group-rule | \n \t snapshot | spot-fleet-request | spot-instances-request | subnet | \n traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target | \n \t transit-gateway | transit-gateway-attachment | \n \t transit-gateway-multicast-domain | transit-gateway-route-table |\n volume | vpc | vpc-endpoint | vpc-endpoint-service | \n \t vpc-flow-log | vpc-peering-connection |\n \t vpn-connection | vpn-gateway.

\n

To tag a resource after it has been created, see CreateTags.

", "smithy.api#xmlName": "resourceType" } }, @@ -62949,7 +63017,7 @@ "InstanceCount": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

The number of instances the Covertible Reserved Instance offering can be applied to. This parameter is reserved and cannot \n be specified in a request

" + "smithy.api#documentation": "

The number of instances the Convertible Reserved Instance offering can be applied to. This parameter is reserved and cannot \n be specified in a request

" } }, "OfferingId": { diff --git a/codegen/sdk-codegen/aws-models/ecr.2015-09-21.json b/codegen/sdk-codegen/aws-models/ecr.2015-09-21.json index bedbd853c48..1739ea4dd5e 100644 --- a/codegen/sdk-codegen/aws-models/ecr.2015-09-21.json +++ b/codegen/sdk-codegen/aws-models/ecr.2015-09-21.json @@ -148,7 +148,7 @@ "name": "ecr" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Elastic Container Registry\n

Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Customers can use the\n familiar Docker CLI, or their preferred client, to push, pull, and manage images. Amazon ECR\n provides a secure, scalable, and reliable registry for your Docker or Open Container\n Initiative (OCI) images. Amazon ECR supports private repositories with resource-based\n permissions using IAM so that specific users or Amazon EC2 instances can access\n repositories and images.

", + "smithy.api#documentation": "Amazon Elastic Container Registry\n

Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Customers can use the\n familiar Docker CLI, or their preferred client, to push, pull, and manage images. Amazon ECR\n provides a secure, scalable, and reliable registry for your Docker or Open Container\n Initiative (OCI) images. Amazon ECR supports private repositories with resource-based\n permissions using IAM so that specific users or Amazon EC2 instances can access\n repositories and images.

\n

Amazon ECR has service endpoints in each supported Region. For more information, see Amazon ECR endpoints in the\n Amazon Web Services General Reference.

", "smithy.api#title": "Amazon EC2 Container Registry", "smithy.api#xmlNamespace": { "uri": "http://ecr.amazonaws.com/doc/2015-09-21/" @@ -276,7 +276,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the image layers to\n check. If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the image layers to\n check. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -341,7 +341,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the image to delete.\n If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the image to delete.\n If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -409,7 +409,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the images to describe.\n If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the images to\n describe. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -519,7 +519,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry to which to upload layers.\n If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry to which to upload layers.\n If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -606,7 +606,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a repository. For more information, see Amazon ECR Repositories in the\n Amazon Elastic Container Registry User Guide.

" + "smithy.api#documentation": "

Creates a repository. For more information, see Amazon ECR repositories in the\n Amazon Elastic Container Registry User Guide.

" } }, "com.amazonaws.ecr#CreateRepositoryRequest": { @@ -691,7 +691,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository.\n If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository.\n If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -837,7 +837,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository policy to\n delete. If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository policy\n to delete. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -878,7 +878,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository to\n delete. If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository to\n delete. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -976,7 +976,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository in\n which to describe the image scan findings for. If you do not specify a registry, the\n default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository in\n which to describe the image scan findings for. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -1096,7 +1096,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository in which\n to describe images. If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository in\n which to describe images. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -1228,7 +1228,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repositories to be\n described. If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repositories to be\n described. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryNames": { @@ -1289,19 +1289,19 @@ "encryptionType": { "target": "com.amazonaws.ecr#EncryptionType", "traits": { - "smithy.api#documentation": "

The encryption type to use.

\n

If you use the KMS encryption type, the contents of the repository will\n be encrypted using server-side encryption with customer master keys (CMKs) stored in\n AWS KMS. When you use AWS KMS to encrypt your data, you can either use the default AWS\n managed CMK for Amazon ECR, or specify your own CMK, which you already created. For more\n information, see Protecting Data Using Server-Side\n Encryption with CMKs Stored in AWS Key Management Service (SSE-KMS) in the\n Amazon Simple Storage Service Console Developer Guide..

\n

If you use the AES256 encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES-256 encryption algorithm. For more information, see Protecting Data Using\n Server-Side Encryption with Amazon S3-Managed Encryption Keys (SSE-S3) in\n the Amazon Simple Storage Service Console Developer Guide..

", + "smithy.api#documentation": "

The encryption type to use.

\n

If you use the KMS encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created. For more\n information, see Protecting data using server-side\n encryption with an KMS key stored in Key Management Service (SSE-KMS) in the\n Amazon Simple Storage Service Console Developer Guide..

\n

If you use the AES256 encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES-256 encryption algorithm. For more information, see Protecting data using\n server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the\n Amazon Simple Storage Service Console Developer Guide..

", "smithy.api#required": {} } }, "kmsKey": { "target": "com.amazonaws.ecr#KmsKey", "traits": { - "smithy.api#documentation": "

If you use the KMS encryption type, specify the CMK to use for\n encryption. The alias, key ID, or full ARN of the CMK can be specified. The key must\n exist in the same Region as the repository. If no key is specified, the default AWS\n managed CMK for Amazon ECR will be used.

" + "smithy.api#documentation": "

If you use the KMS encryption type, specify the KMS key to use for\n encryption. The alias, key ID, or full ARN of the KMS key can be specified. The key\n must exist in the same Region as the repository. If no key is specified, the default\n Amazon Web Services managed KMS key for Amazon ECR will be used.

" } } }, "traits": { - "smithy.api#documentation": "

The encryption configuration for the repository. This determines how the contents of\n your repository are encrypted at rest.

\n

By default, when no encryption configuration is set or the AES256\n encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption\n keys which encrypts your data at rest using an AES-256 encryption algorithm. This does\n not require any action on your part.

\n

For more control over the encryption of the contents of your repository, you can use\n server-side encryption with customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) to\n encrypt your images. For more information, see Amazon ECR encryption at\n rest in the Amazon Elastic Container Registry User Guide.

" + "smithy.api#documentation": "

The encryption configuration for the repository. This determines how the contents of\n your repository are encrypted at rest.

\n

By default, when no encryption configuration is set or the AES256\n encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption\n keys which encrypts your data at rest using an AES-256 encryption algorithm. This does\n not require any action on your part.

\n

For more control over the encryption of the contents of your repository, you can use\n server-side encryption with Key Management Service key stored in Key Management Service (KMS) to encrypt your\n images. For more information, see Amazon ECR encryption at\n rest in the Amazon Elastic Container Registry User Guide.

" } }, "com.amazonaws.ecr#EncryptionType": { @@ -1394,7 +1394,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves an authorization token. An authorization token represents your IAM\n authentication credentials and can be used to access any Amazon ECR registry that your IAM\n principal has access to. The authorization token is valid for 12 hours.

\n

The authorizationToken returned is a base64 encoded string that can be\n decoded and used in a docker login command to authenticate to a registry.\n The AWS CLI offers an get-login-password command that simplifies the login\n process. For more information, see Registry\n Authentication in the Amazon Elastic Container Registry User Guide.

" + "smithy.api#documentation": "

Retrieves an authorization token. An authorization token represents your IAM\n authentication credentials and can be used to access any Amazon ECR registry that your IAM\n principal has access to. The authorization token is valid for 12 hours.

\n

The authorizationToken returned is a base64 encoded string that can be\n decoded and used in a docker login command to authenticate to a registry.\n The CLI offers an get-login-password command that simplifies the login\n process. For more information, see Registry\n authentication in the Amazon Elastic Container Registry User Guide.

" } }, "com.amazonaws.ecr#GetAuthorizationTokenRegistryIdList": { @@ -1418,7 +1418,7 @@ "smithy.api#deprecated": { "message": "This field is deprecated. The returned authorization token can be used to access any Amazon ECR registry that the IAM principal has access to, specifying a registry ID doesn't change the permissions scope of the authorization token." }, - "smithy.api#documentation": "

A list of AWS account IDs that are associated with the registries for which to get\n AuthorizationData objects. If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

A list of Amazon Web Services account IDs that are associated with the registries for which to get\n AuthorizationData objects. If you do not specify a registry, the default registry is assumed.

" } } } @@ -1460,7 +1460,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. You\n can only get URLs for image layers that are referenced in an image.

\n

When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer\n that is not already cached.

\n \n

This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

\n
" + "smithy.api#documentation": "

Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer. You can\n only get URLs for image layers that are referenced in an image.

\n

When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer\n that is not already cached.

\n \n

This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

\n
" } }, "com.amazonaws.ecr#GetDownloadUrlForLayerRequest": { @@ -1469,7 +1469,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the image layer to\n download. If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the image layer to\n download. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -1597,7 +1597,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository.\n If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository.\n If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -1686,7 +1686,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository.\n If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository.\n If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -1803,7 +1803,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository.\n If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository.\n If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -1844,7 +1844,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry containing the image.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry containing the image.

" } }, "repositoryName": { @@ -1917,7 +1917,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry to which this image belongs.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry to which this image belongs.

" } }, "repositoryName": { @@ -2257,7 +2257,7 @@ "scanOnPush": { "target": "com.amazonaws.ecr#ScanOnPushFlag", "traits": { - "smithy.api#documentation": "

The setting that determines whether images are scanned after being pushed to a\n repository. If set to true, images will be scanned after being pushed. If\n this parameter is not specified, it will default to false and images will\n not be scanned unless a scan is manually started with the StartImageScan API.

" + "smithy.api#documentation": "

The setting that determines whether images are scanned after being pushed to a\n repository. If set to true, images will be scanned after being pushed. If\n this parameter is not specified, it will default to false and images will\n not be scanned unless a scan is manually started with the API_StartImageScan API.

" } } }, @@ -2345,7 +2345,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry to which you intend to upload layers.\n If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry to which you intend to upload\n layers. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -2467,7 +2467,7 @@ "kmsError": { "target": "com.amazonaws.ecr#KmsError", "traits": { - "smithy.api#documentation": "

The error code returned by AWS KMS.

" + "smithy.api#documentation": "

The error code returned by KMS.

" } } }, @@ -2550,7 +2550,7 @@ "com.amazonaws.ecr#LayerDigest": { "type": "string", "traits": { - "smithy.api#pattern": "[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+" + "smithy.api#pattern": "^[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+$" } }, "com.amazonaws.ecr#LayerDigestList": { @@ -2865,7 +2865,7 @@ } }, "traits": { - "smithy.api#documentation": "

The operation did not succeed because it would have exceeded a service limit for your\n account. For more information, see Amazon ECR Service Quotas in\n the Amazon Elastic Container Registry User Guide.

", + "smithy.api#documentation": "

The operation did not succeed because it would have exceeded a service limit for your\n account. For more information, see Amazon ECR service quotas in\n the Amazon Elastic Container Registry User Guide.

", "smithy.api#error": "client" } }, @@ -2918,7 +2918,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository in which\n to list images. If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository in\n which to list images. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -3104,7 +3104,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository in which\n to put the image. If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository in\n which to put the image. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -3181,7 +3181,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository in\n which to update the image scanning configuration setting.\n If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository in\n which to update the image scanning configuration setting.\n If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -3243,7 +3243,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the image tag mutability settings for the specified repository. For more\n information, see Image Tag\n Mutability in the Amazon Elastic Container Registry User Guide.

" + "smithy.api#documentation": "

Updates the image tag mutability settings for the specified repository. For more\n information, see Image tag\n mutability in the Amazon Elastic Container Registry User Guide.

" } }, "com.amazonaws.ecr#PutImageTagMutabilityRequest": { @@ -3252,7 +3252,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository in which\n to update the image tag mutability settings. If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository in\n which to update the image tag mutability settings. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -3314,7 +3314,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates or updates the lifecycle policy for the specified repository. For more\n information, see Lifecycle Policy\n Template.

" + "smithy.api#documentation": "

Creates or updates the lifecycle policy for the specified repository. For more\n information, see Lifecycle policy\n template.

" } }, "com.amazonaws.ecr#PutLifecyclePolicyRequest": { @@ -3323,7 +3323,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository. If you\n do\u2028 not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository. If you\n do\u2028 not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -3382,7 +3382,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates or updates the permissions policy for your registry.

\n

A registry policy is used to specify permissions for another AWS account and is used\n when configuring cross-account replication. For more information, see Registry permissions in the Amazon Elastic Container Registry User Guide.

" + "smithy.api#documentation": "

Creates or updates the permissions policy for your registry.

\n

A registry policy is used to specify permissions for another Amazon Web Services account and is used\n when configuring cross-account replication. For more information, see Registry permissions in the Amazon Elastic Container Registry User Guide.

" } }, "com.amazonaws.ecr#PutRegistryPolicyRequest": { @@ -3434,7 +3434,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates or updates the replication configuration for a registry. The existing\n replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the\n PutReplicationConfiguration API is called, a service-linked IAM role is created in\n your account for the replication process. For more information, see Using\n Service-Linked Roles for Amazon ECR in the\n Amazon Elastic Container Registry User Guide.

\n \n

When configuring cross-account replication, the destination account must grant the\n source account permission to replicate. This permission is controlled using a\n registry permissions policy. For more information, see PutRegistryPolicy.

\n
" + "smithy.api#documentation": "

Creates or updates the replication configuration for a registry. The existing\n replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the\n PutReplicationConfiguration API is called, a service-linked IAM role is created in\n your account for the replication process. For more information, see Using\n service-linked roles for Amazon ECR in the\n Amazon Elastic Container Registry User Guide.

\n \n

When configuring cross-account replication, the destination account must grant the\n source account permission to replicate. This permission is controlled using a\n registry permissions policy. For more information, see PutRegistryPolicy.

\n
" } }, "com.amazonaws.ecr#PutReplicationConfigurationRequest": { @@ -3479,13 +3479,13 @@ "min": 2, "max": 25 }, - "smithy.api#pattern": "[0-9a-z-]{2,25}" + "smithy.api#pattern": "^[0-9a-z-]{2,25}$" } }, "com.amazonaws.ecr#RegistryId": { "type": "string", "traits": { - "smithy.api#pattern": "[0-9]{12}" + "smithy.api#pattern": "^[0-9]{12}$" } }, "com.amazonaws.ecr#RegistryPolicyNotFoundException": { @@ -3591,13 +3591,13 @@ "repositoryArn": { "target": "com.amazonaws.ecr#Arn", "traits": { - "smithy.api#documentation": "

The Amazon Resource Name (ARN) that identifies the repository. The ARN contains the arn:aws:ecr namespace, followed by the region of the\n repository, AWS account ID of the repository owner, repository namespace, and repository name.\n For example, arn:aws:ecr:region:012345678910:repository/test.

" + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that identifies the repository. The ARN contains the arn:aws:ecr namespace, followed by the region of the\n repository, Amazon Web Services account ID of the repository owner, repository namespace, and repository name.\n For example, arn:aws:ecr:region:012345678910:repository/test.

" } }, "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository.

" } }, "repositoryName": { @@ -3666,7 +3666,7 @@ "min": 2, "max": 256 }, - "smithy.api#pattern": "(?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*" + "smithy.api#pattern": "^(?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*$" } }, "com.amazonaws.ecr#RepositoryNameList": { @@ -3810,7 +3810,7 @@ } ], "traits": { - "smithy.api#documentation": "

Applies a repository policy to the specified repository to control access permissions.\n For more information, see Amazon ECR Repository\n Policies in the Amazon Elastic Container Registry User Guide.

" + "smithy.api#documentation": "

Applies a repository policy to the specified repository to control access permissions.\n For more information, see Amazon ECR Repository\n policies in the Amazon Elastic Container Registry User Guide.

" } }, "com.amazonaws.ecr#SetRepositoryPolicyRequest": { @@ -3819,7 +3819,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository.\n If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository.\n If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -3832,7 +3832,7 @@ "policyText": { "target": "com.amazonaws.ecr#RepositoryPolicyText", "traits": { - "smithy.api#documentation": "

The JSON repository policy text to apply to the repository. For more information, see\n Amazon ECR Repository\n Policies in the Amazon Elastic Container Registry User Guide.

", + "smithy.api#documentation": "

The JSON repository policy text to apply to the repository. For more information, see\n Amazon ECR repository\n policies in the Amazon Elastic Container Registry User Guide.

", "smithy.api#required": {} } }, @@ -3905,7 +3905,7 @@ } ], "traits": { - "smithy.api#documentation": "

Starts an image vulnerability scan. An image scan can only be started once per day on\n an individual image. This limit includes if an image was scanned on initial push. For\n more information, see Image Scanning in the\n Amazon Elastic Container Registry User Guide.

" + "smithy.api#documentation": "

Starts an image vulnerability scan. An image scan can only be started once per 24\n hours on an individual image. This limit includes if an image was scanned on initial\n push. For more information, see Image scanning in the\n Amazon Elastic Container Registry User Guide.

" } }, "com.amazonaws.ecr#StartImageScanRequest": { @@ -3914,7 +3914,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository in\n which to start an image scan request. If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository in\n which to start an image scan request. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -3993,7 +3993,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry that contains the repository.\n If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry that contains the repository.\n If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { @@ -4228,7 +4228,7 @@ "com.amazonaws.ecr#UploadId": { "type": "string", "traits": { - "smithy.api#pattern": "[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" + "smithy.api#pattern": "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" } }, "com.amazonaws.ecr#UploadLayerPart": { @@ -4272,7 +4272,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

The AWS account ID associated with the registry to which you are uploading layer\n parts. If you do not specify a registry, the default registry is assumed.

" + "smithy.api#documentation": "

The Amazon Web Services account ID associated with the registry to which you are uploading layer\n parts. If you do not specify a registry, the default registry is assumed.

" } }, "repositoryName": { diff --git a/codegen/sdk-codegen/aws-models/iot.2015-05-28.json b/codegen/sdk-codegen/aws-models/iot.2015-05-28.json index a35b641ee57..6a4c973fc23 100644 --- a/codegen/sdk-codegen/aws-models/iot.2015-05-28.json +++ b/codegen/sdk-codegen/aws-models/iot.2015-05-28.json @@ -979,7 +979,7 @@ "elasticsearch": { "target": "com.amazonaws.iot#ElasticsearchAction", "traits": { - "smithy.api#documentation": "

Write data to an Amazon Elasticsearch Service domain.

" + "smithy.api#documentation": "

Write data to an Amazon Elasticsearch Service domain.

\n \n

This action is deprecated. Use the OpenSearch action instead.

\n
" } }, "salesforce": { @@ -1029,6 +1029,12 @@ "traits": { "smithy.api#documentation": "

Send messages to an Amazon Managed Streaming for Apache Kafka (Amazon MSK) or self-managed Apache Kafka cluster.

" } + }, + "openSearch": { + "target": "com.amazonaws.iot#OpenSearchAction", + "traits": { + "smithy.api#documentation": "

Write data to an Amazon OpenSearch Service domain.

" + } } }, "traits": { @@ -13118,7 +13124,7 @@ } }, "traits": { - "smithy.api#documentation": "

Describes an action that writes data to an Amazon Elasticsearch Service\n domain.

" + "smithy.api#documentation": "

Describes an action that writes data to an Amazon Elasticsearch Service\n domain.

\n \n

This action is deprecated. Use the OpenSearch action instead.

\n
" } }, "com.amazonaws.iot#ElasticsearchEndpoint": { @@ -22007,6 +22013,49 @@ "target": "com.amazonaws.iot#OTAUpdateSummary" } }, + "com.amazonaws.iot#OpenSearchAction": { + "type": "structure", + "members": { + "roleArn": { + "target": "com.amazonaws.iot#AwsArn", + "traits": { + "smithy.api#documentation": "

The IAM role ARN that has access to OpenSearch.

", + "smithy.api#required": {} + } + }, + "endpoint": { + "target": "com.amazonaws.iot#ElasticsearchEndpoint", + "traits": { + "smithy.api#documentation": "

The endpoint of your OpenSearch domain.

", + "smithy.api#required": {} + } + }, + "index": { + "target": "com.amazonaws.iot#ElasticsearchIndex", + "traits": { + "smithy.api#documentation": "

The OpenSearch index where you want to store your data.

", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.iot#ElasticsearchType", + "traits": { + "smithy.api#documentation": "

The type of document you are storing.

", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.iot#ElasticsearchId", + "traits": { + "smithy.api#documentation": "

The unique identifier for the document you are storing.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Describes an action that writes data to an Amazon OpenSearch Service\n domain.

" + } + }, "com.amazonaws.iot#OptionalVersion": { "type": "long", "traits": { @@ -26185,7 +26234,7 @@ "disconnectReason": { "target": "com.amazonaws.iot#DisconnectReason", "traits": { - "smithy.api#documentation": "

The reason why the client is disconnected.

" + "smithy.api#documentation": "

The reason why the client is disconnected. If the thing has been disconnected for approximately an hour, the disconnectReason value might be missing.

" } } }, diff --git a/codegen/sdk-codegen/aws-models/kafkaconnect.2021-09-14.json b/codegen/sdk-codegen/aws-models/kafkaconnect.2021-09-14.json new file mode 100644 index 00000000000..6804798fb64 --- /dev/null +++ b/codegen/sdk-codegen/aws-models/kafkaconnect.2021-09-14.json @@ -0,0 +1,2727 @@ +{ + "smithy": "1.0", + "shapes": { + "com.amazonaws.kafkaconnect#ApacheKafkaCluster": { + "type": "structure", + "members": { + "bootstrapServers": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The bootstrap servers of the cluster.

", + "smithy.api#required": {} + } + }, + "vpc": { + "target": "com.amazonaws.kafkaconnect#Vpc", + "traits": { + "smithy.api#documentation": "

Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the Apache Kafka cluster to which the connector is connected.

" + } + }, + "com.amazonaws.kafkaconnect#ApacheKafkaClusterDescription": { + "type": "structure", + "members": { + "bootstrapServers": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The bootstrap servers of the cluster.

" + } + }, + "vpc": { + "target": "com.amazonaws.kafkaconnect#VpcDescription", + "traits": { + "smithy.api#documentation": "

Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The description of the Apache Kafka cluster to which the connector is connected.

" + } + }, + "com.amazonaws.kafkaconnect#AutoScaling": { + "type": "structure", + "members": { + "maxWorkerCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "traits": { + "smithy.api#documentation": "

The maximum number of workers allocated to the connector.

", + "smithy.api#required": {} + } + }, + "mcuCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max8", + "traits": { + "smithy.api#documentation": "

The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

", + "smithy.api#required": {} + } + }, + "minWorkerCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "traits": { + "smithy.api#documentation": "

The minimum number of workers allocated to the connector.

", + "smithy.api#required": {} + } + }, + "scaleInPolicy": { + "target": "com.amazonaws.kafkaconnect#ScaleInPolicy", + "traits": { + "smithy.api#documentation": "

The sacle-in policy for the connector.

" + } + }, + "scaleOutPolicy": { + "target": "com.amazonaws.kafkaconnect#ScaleOutPolicy", + "traits": { + "smithy.api#documentation": "

The sacle-out policy for the connector.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specifies how the connector scales.

" + } + }, + "com.amazonaws.kafkaconnect#AutoScalingDescription": { + "type": "structure", + "members": { + "maxWorkerCount": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "

The maximum number of workers allocated to the connector.

" + } + }, + "mcuCount": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "

The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

" + } + }, + "minWorkerCount": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "

The minimum number of workers allocated to the connector.

" + } + }, + "scaleInPolicy": { + "target": "com.amazonaws.kafkaconnect#ScaleInPolicyDescription", + "traits": { + "smithy.api#documentation": "

The sacle-in policy for the connector.

" + } + }, + "scaleOutPolicy": { + "target": "com.amazonaws.kafkaconnect#ScaleOutPolicyDescription", + "traits": { + "smithy.api#documentation": "

The sacle-out policy for the connector.>

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the auto scaling parameters for the connector.

" + } + }, + "com.amazonaws.kafkaconnect#AutoScalingUpdate": { + "type": "structure", + "members": { + "maxWorkerCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "traits": { + "smithy.api#documentation": "

The target maximum number of workers allocated to the connector.

", + "smithy.api#required": {} + } + }, + "mcuCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max8", + "traits": { + "smithy.api#documentation": "

The target number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

", + "smithy.api#required": {} + } + }, + "minWorkerCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "traits": { + "smithy.api#documentation": "

The target minimum number of workers allocated to the connector.

", + "smithy.api#required": {} + } + }, + "scaleInPolicy": { + "target": "com.amazonaws.kafkaconnect#ScaleInPolicyUpdate", + "traits": { + "smithy.api#documentation": "

The target sacle-in policy for the connector.

", + "smithy.api#required": {} + } + }, + "scaleOutPolicy": { + "target": "com.amazonaws.kafkaconnect#ScaleOutPolicyUpdate", + "traits": { + "smithy.api#documentation": "

The target sacle-out policy for the connector.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The updates to the auto scaling parameters for the connector.

" + } + }, + "com.amazonaws.kafkaconnect#BadRequestException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "

HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.

", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.kafkaconnect#Capacity": { + "type": "structure", + "members": { + "autoScaling": { + "target": "com.amazonaws.kafkaconnect#AutoScaling", + "traits": { + "smithy.api#documentation": "

Information about the auto scaling parameters for the connector.

" + } + }, + "provisionedCapacity": { + "target": "com.amazonaws.kafkaconnect#ProvisionedCapacity", + "traits": { + "smithy.api#documentation": "

Details about a fixed capacity allocated to a connector.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the capacity of the connector, whether it is auto scaled or provisioned.

" + } + }, + "com.amazonaws.kafkaconnect#CapacityDescription": { + "type": "structure", + "members": { + "autoScaling": { + "target": "com.amazonaws.kafkaconnect#AutoScalingDescription", + "traits": { + "smithy.api#documentation": "

Describes the connector's auto scaling capacity.

" + } + }, + "provisionedCapacity": { + "target": "com.amazonaws.kafkaconnect#ProvisionedCapacityDescription", + "traits": { + "smithy.api#documentation": "

Describes a connector's provisioned capacity.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A description of the connector's capacity.

" + } + }, + "com.amazonaws.kafkaconnect#CapacityUpdate": { + "type": "structure", + "members": { + "autoScaling": { + "target": "com.amazonaws.kafkaconnect#AutoScalingUpdate", + "traits": { + "smithy.api#documentation": "

The target auto scaling setting.

" + } + }, + "provisionedCapacity": { + "target": "com.amazonaws.kafkaconnect#ProvisionedCapacityUpdate", + "traits": { + "smithy.api#documentation": "

The target settings for provisioned capacity.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The target capacity for the connector. The capacity can be auto scaled or provisioned.

" + } + }, + "com.amazonaws.kafkaconnect#CloudWatchLogsLogDelivery": { + "type": "structure", + "members": { + "enabled": { + "target": "com.amazonaws.kafkaconnect#__boolean", + "traits": { + "smithy.api#documentation": "

Whether log delivery to Amazon CloudWatch Logs is enabled.

", + "smithy.api#required": {} + } + }, + "logGroup": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the CloudWatch log group that is the destination for log delivery.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The settings for delivering connector logs to Amazon CloudWatch Logs.

" + } + }, + "com.amazonaws.kafkaconnect#CloudWatchLogsLogDeliveryDescription": { + "type": "structure", + "members": { + "enabled": { + "target": "com.amazonaws.kafkaconnect#__boolean", + "traits": { + "smithy.api#documentation": "

Whether log delivery to Amazon CloudWatch Logs is enabled.

" + } + }, + "logGroup": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the CloudWatch log group that is the destination for log delivery.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A description of the log delivery settings.

" + } + }, + "com.amazonaws.kafkaconnect#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "

HTTP Status Code 409: Conflict. A resource with this name already exists. Retry your request with another name.

", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.kafkaconnect#ConnectorState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "RUNNING", + "name": "RUNNING" + }, + { + "value": "CREATING", + "name": "CREATING" + }, + { + "value": "UPDATING", + "name": "UPDATING" + }, + { + "value": "DELETING", + "name": "DELETING" + }, + { + "value": "FAILED", + "name": "FAILED" + } + ] + } + }, + "com.amazonaws.kafkaconnect#ConnectorSummary": { + "type": "structure", + "members": { + "capacity": { + "target": "com.amazonaws.kafkaconnect#CapacityDescription", + "traits": { + "smithy.api#documentation": "

The connector's compute capacity settings.

" + } + }, + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector.

" + } + }, + "connectorDescription": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The description of the connector.

" + } + }, + "connectorName": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the connector.

" + } + }, + "connectorState": { + "target": "com.amazonaws.kafkaconnect#ConnectorState", + "traits": { + "smithy.api#documentation": "

The state of the connector.

" + } + }, + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time that the connector was created.

" + } + }, + "currentVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The current version of the connector.

" + } + }, + "kafkaCluster": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterDescription", + "traits": { + "smithy.api#documentation": "

The details of the Apache Kafka cluster to which the connector is connected.

" + } + }, + "kafkaClusterClientAuthentication": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationDescription", + "traits": { + "smithy.api#documentation": "

The type of client authentication used to connect to the Apache Kafka cluster. The value is NONE when no client authentication is used.

" + } + }, + "kafkaClusterEncryptionInTransit": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransitDescription", + "traits": { + "smithy.api#documentation": "

Details of encryption in transit to the Apache Kafka cluster.

" + } + }, + "kafkaConnectVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.

" + } + }, + "logDelivery": { + "target": "com.amazonaws.kafkaconnect#LogDeliveryDescription", + "traits": { + "smithy.api#documentation": "

The settings for delivering connector logs to Amazon CloudWatch Logs.

" + } + }, + "plugins": { + "target": "com.amazonaws.kafkaconnect#__listOfPluginDescription", + "traits": { + "smithy.api#documentation": "

Specifies which plugins were used for this connector.

" + } + }, + "serviceExecutionRoleArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.

" + } + }, + "workerConfiguration": { + "target": "com.amazonaws.kafkaconnect#WorkerConfigurationDescription", + "traits": { + "smithy.api#documentation": "

The worker configurations that are in use with the connector.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Summary of a connector.

" + } + }, + "com.amazonaws.kafkaconnect#CreateConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#CreateConnectorRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#CreateConnectorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ConflictException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a connector using the specified properties.

", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/connectors", + "code": 200 + } + } + }, + "com.amazonaws.kafkaconnect#CreateConnectorRequest": { + "type": "structure", + "members": { + "capacity": { + "target": "com.amazonaws.kafkaconnect#Capacity", + "traits": { + "smithy.api#documentation": "

Information about the capacity allocated to the connector. Exactly one of the two properties must be specified.

", + "smithy.api#required": {} + } + }, + "connectorConfiguration": { + "target": "com.amazonaws.kafkaconnect#__mapOf__string", + "traits": { + "smithy.api#documentation": "

A map of keys to values that represent the configuration for the connector.

", + "smithy.api#required": {} + } + }, + "connectorDescription": { + "target": "com.amazonaws.kafkaconnect#__stringMax1024", + "traits": { + "smithy.api#documentation": "

A summary description of the connector.

" + } + }, + "connectorName": { + "target": "com.amazonaws.kafkaconnect#__stringMin1Max128", + "traits": { + "smithy.api#documentation": "

The name of the connector.

", + "smithy.api#required": {} + } + }, + "kafkaCluster": { + "target": "com.amazonaws.kafkaconnect#KafkaCluster", + "traits": { + "smithy.api#documentation": "

Specifies which Apache Kafka cluster to connect to.

", + "smithy.api#required": {} + } + }, + "kafkaClusterClientAuthentication": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthentication", + "traits": { + "smithy.api#documentation": "

Details of the client authentication used by the Apache Kafka cluster.

", + "smithy.api#required": {} + } + }, + "kafkaClusterEncryptionInTransit": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransit", + "traits": { + "smithy.api#documentation": "

Details of encryption in transit to the Apache Kafka cluster.

", + "smithy.api#required": {} + } + }, + "kafkaConnectVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.

", + "smithy.api#required": {} + } + }, + "logDelivery": { + "target": "com.amazonaws.kafkaconnect#LogDelivery", + "traits": { + "smithy.api#documentation": "

Details about log delivery.

" + } + }, + "plugins": { + "target": "com.amazonaws.kafkaconnect#__listOfPlugin", + "traits": { + "smithy.api#documentation": "

Specifies which plugins to use for the connector.

", + "smithy.api#required": {} + } + }, + "serviceExecutionRoleArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.

", + "smithy.api#required": {} + } + }, + "workerConfiguration": { + "target": "com.amazonaws.kafkaconnect#WorkerConfiguration", + "traits": { + "smithy.api#documentation": "

Specifies which worker configuration to use with the connector.

" + } + } + } + }, + "com.amazonaws.kafkaconnect#CreateConnectorResponse": { + "type": "structure", + "members": { + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that Amazon assigned to the connector.

" + } + }, + "connectorName": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the connector.

" + } + }, + "connectorState": { + "target": "com.amazonaws.kafkaconnect#ConnectorState", + "traits": { + "smithy.api#documentation": "

The state of the connector.

" + } + } + } + }, + "com.amazonaws.kafkaconnect#CreateCustomPlugin": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#CreateCustomPluginRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#CreateCustomPluginResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ConflictException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a custom plugin using the specified properties.

", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/custom-plugins", + "code": 200 + } + } + }, + "com.amazonaws.kafkaconnect#CreateCustomPluginRequest": { + "type": "structure", + "members": { + "contentType": { + "target": "com.amazonaws.kafkaconnect#CustomPluginContentType", + "traits": { + "smithy.api#documentation": "

The type of the plugin file.

", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__stringMax1024", + "traits": { + "smithy.api#documentation": "

A summary description of the custom plugin.

" + } + }, + "location": { + "target": "com.amazonaws.kafkaconnect#CustomPluginLocation", + "traits": { + "smithy.api#documentation": "

Information about the location of a custom plugin.

", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__stringMin1Max128", + "traits": { + "smithy.api#documentation": "

The name of the custom plugin.

", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafkaconnect#CreateCustomPluginResponse": { + "type": "structure", + "members": { + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that Amazon assigned to the custom plugin.

" + } + }, + "customPluginState": { + "target": "com.amazonaws.kafkaconnect#CustomPluginState", + "traits": { + "smithy.api#documentation": "

The state of the custom plugin.

" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the custom plugin.

" + } + }, + "revision": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "

The revision of the custom plugin.

" + } + } + } + }, + "com.amazonaws.kafkaconnect#CreateWorkerConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#CreateWorkerConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#CreateWorkerConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ConflictException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a worker configuration using the specified properties.

", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/worker-configurations", + "code": 200 + } + } + }, + "com.amazonaws.kafkaconnect#CreateWorkerConfigurationRequest": { + "type": "structure", + "members": { + "description": { + "target": "com.amazonaws.kafkaconnect#__stringMax1024", + "traits": { + "smithy.api#documentation": "

A summary description of the worker configuration.

" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__stringMin1Max128", + "traits": { + "smithy.api#documentation": "

The name of the worker configuration.

", + "smithy.api#required": {} + } + }, + "propertiesFileContent": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

Base64 encoded contents of connect-distributed.properties file.

", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafkaconnect#CreateWorkerConfigurationResponse": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time that the worker configuration was created.

" + } + }, + "latestRevision": { + "target": "com.amazonaws.kafkaconnect#WorkerConfigurationRevisionSummary", + "traits": { + "smithy.api#documentation": "

The latest revision of the worker configuration.

" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the worker configuration.

" + } + }, + "workerConfigurationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) that Amazon assigned to the worker configuration.

" + } + } + } + }, + "com.amazonaws.kafkaconnect#CustomPlugin": { + "type": "structure", + "members": { + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the custom plugin.

", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.kafkaconnect#__longMin1", + "traits": { + "smithy.api#documentation": "

The revision of the custom plugin.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A plugin is an AWS resource that contains the code that defines a connector's logic.

" + } + }, + "com.amazonaws.kafkaconnect#CustomPluginContentType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "JAR", + "name": "JAR" + }, + { + "value": "ZIP", + "name": "ZIP" + } + ] + } + }, + "com.amazonaws.kafkaconnect#CustomPluginDescription": { + "type": "structure", + "members": { + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the custom plugin.

" + } + }, + "revision": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "

The revision of the custom plugin.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about a custom plugin.

" + } + }, + "com.amazonaws.kafkaconnect#CustomPluginFileDescription": { + "type": "structure", + "members": { + "fileMd5": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The hex-encoded MD5 checksum of the custom plugin file. You can use it to validate the file.

" + } + }, + "fileSize": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "

The size in bytes of the custom plugin file. You can use it to validate the file.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about a custom plugin file.

" + } + }, + "com.amazonaws.kafkaconnect#CustomPluginLocation": { + "type": "structure", + "members": { + "s3Location": { + "target": "com.amazonaws.kafkaconnect#S3Location", + "traits": { + "smithy.api#documentation": "

The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the location of a custom plugin.

" + } + }, + "com.amazonaws.kafkaconnect#CustomPluginLocationDescription": { + "type": "structure", + "members": { + "s3Location": { + "target": "com.amazonaws.kafkaconnect#S3LocationDescription", + "traits": { + "smithy.api#documentation": "

The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the location of a custom plugin.

" + } + }, + "com.amazonaws.kafkaconnect#CustomPluginRevisionSummary": { + "type": "structure", + "members": { + "contentType": { + "target": "com.amazonaws.kafkaconnect#CustomPluginContentType", + "traits": { + "smithy.api#documentation": "

The format of the plugin file.

" + } + }, + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time that the custom plugin was created.

" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The description of the custom plugin.

" + } + }, + "fileDescription": { + "target": "com.amazonaws.kafkaconnect#CustomPluginFileDescription", + "traits": { + "smithy.api#documentation": "

Details about the custom plugin file.

" + } + }, + "location": { + "target": "com.amazonaws.kafkaconnect#CustomPluginLocationDescription", + "traits": { + "smithy.api#documentation": "

Information about the location of the custom plugin.

" + } + }, + "revision": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "

The revision of the custom plugin.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about the revision of a custom plugin.

" + } + }, + "com.amazonaws.kafkaconnect#CustomPluginState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATING", + "name": "CREATING" + }, + { + "value": "CREATE_FAILED", + "name": "CREATE_FAILED" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "UPDATING", + "name": "UPDATING" + }, + { + "value": "UPDATE_FAILED", + "name": "UPDATE_FAILED" + }, + { + "value": "DELETING", + "name": "DELETING" + } + ] + } + }, + "com.amazonaws.kafkaconnect#CustomPluginSummary": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time that the custom plugin was created.

" + } + }, + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the custom plugin.

" + } + }, + "customPluginState": { + "target": "com.amazonaws.kafkaconnect#CustomPluginState", + "traits": { + "smithy.api#documentation": "

The state of the custom plugin.

" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

A description of the custom plugin.

" + } + }, + "latestRevision": { + "target": "com.amazonaws.kafkaconnect#CustomPluginRevisionSummary", + "traits": { + "smithy.api#documentation": "

The latest revision of the custom plugin.

" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the custom plugin.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A summary of the custom plugin.

" + } + }, + "com.amazonaws.kafkaconnect#DeleteConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#DeleteConnectorRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#DeleteConnectorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the specified connector.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/v1/connectors/{connectorArn}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.kafkaconnect#DeleteConnectorRequest": { + "type": "structure", + "members": { + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector that you want to delete.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "currentVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The current version of the connector that you want to delete.

", + "smithy.api#httpQuery": "currentVersion" + } + } + } + }, + "com.amazonaws.kafkaconnect#DeleteConnectorResponse": { + "type": "structure", + "members": { + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector that you requested to delete.

" + } + }, + "connectorState": { + "target": "com.amazonaws.kafkaconnect#ConnectorState", + "traits": { + "smithy.api#documentation": "

The state of the connector that you requested to delete.

" + } + } + } + }, + "com.amazonaws.kafkaconnect#DescribeConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#DescribeConnectorRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#DescribeConnectorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns summary information about the connector.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/connectors/{connectorArn}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#DescribeConnectorRequest": { + "type": "structure", + "members": { + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector that you want to describe.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafkaconnect#DescribeConnectorResponse": { + "type": "structure", + "members": { + "capacity": { + "target": "com.amazonaws.kafkaconnect#CapacityDescription", + "traits": { + "smithy.api#documentation": "

Information about the capacity of the connector, whether it is auto scaled or provisioned.

" + } + }, + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector.

" + } + }, + "connectorConfiguration": { + "target": "com.amazonaws.kafkaconnect#__mapOf__string", + "traits": { + "smithy.api#documentation": "

A map of keys to values that represent the configuration for the connector.

" + } + }, + "connectorDescription": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

A summary description of the connector.

" + } + }, + "connectorName": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the connector.

" + } + }, + "connectorState": { + "target": "com.amazonaws.kafkaconnect#ConnectorState", + "traits": { + "smithy.api#documentation": "

The state of the connector.

" + } + }, + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time the connector was created.

" + } + }, + "currentVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The current version of the connector.

" + } + }, + "kafkaCluster": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterDescription", + "traits": { + "smithy.api#documentation": "

The Apache Kafka cluster that the connector is connected to.

" + } + }, + "kafkaClusterClientAuthentication": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationDescription", + "traits": { + "smithy.api#documentation": "

The type of client authentication used to connect to the Apache Kafka cluster. The value is NONE when no client authentication is used.

" + } + }, + "kafkaClusterEncryptionInTransit": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransitDescription", + "traits": { + "smithy.api#documentation": "

Details of encryption in transit to the Apache Kafka cluster.

" + } + }, + "kafkaConnectVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.

" + } + }, + "logDelivery": { + "target": "com.amazonaws.kafkaconnect#LogDeliveryDescription", + "traits": { + "smithy.api#documentation": "

Details about delivering logs to Amazon CloudWatch Logs.

" + } + }, + "plugins": { + "target": "com.amazonaws.kafkaconnect#__listOfPluginDescription", + "traits": { + "smithy.api#documentation": "

Specifies which plugins were used for this connector.

" + } + }, + "serviceExecutionRoleArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.

" + } + }, + "workerConfiguration": { + "target": "com.amazonaws.kafkaconnect#WorkerConfigurationDescription", + "traits": { + "smithy.api#documentation": "

Specifies which worker configuration was used for the connector.

" + } + } + } + }, + "com.amazonaws.kafkaconnect#DescribeCustomPlugin": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#DescribeCustomPluginRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#DescribeCustomPluginResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

A summary description of the custom plugin.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/custom-plugins/{customPluginArn}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#DescribeCustomPluginRequest": { + "type": "structure", + "members": { + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

Returns information about a custom plugin.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafkaconnect#DescribeCustomPluginResponse": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time that the custom plugin was created.

" + } + }, + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the custom plugin.

" + } + }, + "customPluginState": { + "target": "com.amazonaws.kafkaconnect#CustomPluginState", + "traits": { + "smithy.api#documentation": "

The state of the custom plugin.

" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The description of the custom plugin.

" + } + }, + "latestRevision": { + "target": "com.amazonaws.kafkaconnect#CustomPluginRevisionSummary", + "traits": { + "smithy.api#documentation": "

The latest successfully created revision of the custom plugin. If there are no successfully created revisions, this field will be absent.

" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the custom plugin.

" + } + } + } + }, + "com.amazonaws.kafkaconnect#DescribeWorkerConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#DescribeWorkerConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#DescribeWorkerConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns information about a worker configuration.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/worker-configurations/{workerConfigurationArn}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#DescribeWorkerConfigurationRequest": { + "type": "structure", + "members": { + "workerConfigurationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the worker configuration that you want to get information about.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafkaconnect#DescribeWorkerConfigurationResponse": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time that the worker configuration was created.

" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The description of the worker configuration.

" + } + }, + "latestRevision": { + "target": "com.amazonaws.kafkaconnect#WorkerConfigurationRevisionDescription", + "traits": { + "smithy.api#documentation": "

The latest revision of the custom configuration.

" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the worker configuration.

" + } + }, + "workerConfigurationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the custom configuration.

" + } + } + } + }, + "com.amazonaws.kafkaconnect#FirehoseLogDelivery": { + "type": "structure", + "members": { + "deliveryStream": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.

" + } + }, + "enabled": { + "target": "com.amazonaws.kafkaconnect#__boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The settings for delivering logs to Amazon Kinesis Data Firehose.

" + } + }, + "com.amazonaws.kafkaconnect#FirehoseLogDeliveryDescription": { + "type": "structure", + "members": { + "deliveryStream": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.

" + } + }, + "enabled": { + "target": "com.amazonaws.kafkaconnect#__boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

A description of the settings for delivering logs to Amazon Kinesis Data Firehose.

" + } + }, + "com.amazonaws.kafkaconnect#ForbiddenException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "

HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.

", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.kafkaconnect#InternalServerErrorException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "

HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.

", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.kafkaconnect#KafkaCluster": { + "type": "structure", + "members": { + "apacheKafkaCluster": { + "target": "com.amazonaws.kafkaconnect#ApacheKafkaCluster", + "traits": { + "smithy.api#documentation": "

The Apache Kafka cluster to which the connector is connected.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The details of the Apache Kafka cluster to which the connector is connected.

" + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterClientAuthentication": { + "type": "structure", + "members": { + "authenticationType": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationType", + "traits": { + "smithy.api#documentation": "

The type of client authentication used to connect to the Apache Kafka cluster. Value NONE means that no client authentication is used.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The client authentication information used in order to authenticate with the Apache Kafka cluster.

" + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationDescription": { + "type": "structure", + "members": { + "authenticationType": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationType", + "traits": { + "smithy.api#documentation": "

The type of client authentication used to connect to the Apache Kafka cluster. Value NONE means that no client authentication is used.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The client authentication information used in order to authenticate with the Apache Kafka cluster.

" + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "NONE", + "name": "NONE" + }, + { + "value": "IAM", + "name": "IAM" + } + ] + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterDescription": { + "type": "structure", + "members": { + "apacheKafkaCluster": { + "target": "com.amazonaws.kafkaconnect#ApacheKafkaClusterDescription", + "traits": { + "smithy.api#documentation": "

The Apache Kafka cluster to which the connector is connected.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details of how to connect to the Apache Kafka cluster.

" + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransit": { + "type": "structure", + "members": { + "encryptionType": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransitType", + "traits": { + "smithy.api#documentation": "

The type of encryption in transit to the Apache Kafka cluster.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Details of encryption in transit to the Apache Kafka cluster.

" + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransitDescription": { + "type": "structure", + "members": { + "encryptionType": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransitType", + "traits": { + "smithy.api#documentation": "

The type of encryption in transit to the Apache Kafka cluster.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The description of the encryption in transit to the Apache Kafka cluster.

" + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransitType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PLAINTEXT", + "name": "PLAINTEXT" + }, + { + "value": "TLS", + "name": "TLS" + } + ] + } + }, + "com.amazonaws.kafkaconnect#KafkaConnect": { + "type": "service", + "version": "2021-09-14", + "operations": [ + { + "target": "com.amazonaws.kafkaconnect#CreateConnector" + }, + { + "target": "com.amazonaws.kafkaconnect#CreateCustomPlugin" + }, + { + "target": "com.amazonaws.kafkaconnect#CreateWorkerConfiguration" + }, + { + "target": "com.amazonaws.kafkaconnect#DeleteConnector" + }, + { + "target": "com.amazonaws.kafkaconnect#DescribeConnector" + }, + { + "target": "com.amazonaws.kafkaconnect#DescribeCustomPlugin" + }, + { + "target": "com.amazonaws.kafkaconnect#DescribeWorkerConfiguration" + }, + { + "target": "com.amazonaws.kafkaconnect#ListConnectors" + }, + { + "target": "com.amazonaws.kafkaconnect#ListCustomPlugins" + }, + { + "target": "com.amazonaws.kafkaconnect#ListWorkerConfigurations" + }, + { + "target": "com.amazonaws.kafkaconnect#UpdateConnector" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "KafkaConnect", + "arnNamespace": "kafkaconnect", + "cloudFormationName": "KafkaConnect", + "cloudTrailEventSource": "kafkaconnect.amazonaws.com", + "endpointPrefix": "kafkaconnect" + }, + "aws.auth#sigv4": { + "name": "kafkaconnect" + }, + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "additionalAllowedHeaders": [ + "X-Api-Key" + ] + }, + "smithy.api#documentation": "

", + "smithy.api#title": "Managed Streaming for Kafka Connect" + } + }, + "com.amazonaws.kafkaconnect#ListConnectors": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#ListConnectorsRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#ListConnectorsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/connectors", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "connectors", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#ListConnectorsRequest": { + "type": "structure", + "members": { + "connectorNamePrefix": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name prefix that you want to use to search for and list connectors.

", + "smithy.api#httpQuery": "connectorNamePrefix" + } + }, + "maxResults": { + "target": "com.amazonaws.kafkaconnect#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of connectors to list in one response.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

", + "smithy.api#httpQuery": "nextToken" + } + } + } + }, + "com.amazonaws.kafkaconnect#ListConnectorsResponse": { + "type": "structure", + "members": { + "connectors": { + "target": "com.amazonaws.kafkaconnect#__listOfConnectorSummary", + "traits": { + "smithy.api#documentation": "

An array of connector descriptions.

" + } + }, + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off.

" + } + } + } + }, + "com.amazonaws.kafkaconnect#ListCustomPlugins": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#ListCustomPluginsRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#ListCustomPluginsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of all of the custom plugins in this account and Region.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/custom-plugins", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "customPlugins", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#ListCustomPluginsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.kafkaconnect#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of custom plugins to list in one response.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

", + "smithy.api#httpQuery": "nextToken" + } + } + } + }, + "com.amazonaws.kafkaconnect#ListCustomPluginsResponse": { + "type": "structure", + "members": { + "customPlugins": { + "target": "com.amazonaws.kafkaconnect#__listOfCustomPluginSummary", + "traits": { + "smithy.api#documentation": "

An array of custom plugin descriptions.

" + } + }, + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

" + } + } + } + }, + "com.amazonaws.kafkaconnect#ListWorkerConfigurations": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#ListWorkerConfigurationsRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#ListWorkerConfigurationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Returns a list of all of the worker configurations in this account and Region.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/worker-configurations", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "workerConfigurations", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#ListWorkerConfigurationsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.kafkaconnect#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of worker configurations to list in one response.

", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

", + "smithy.api#httpQuery": "nextToken" + } + } + } + }, + "com.amazonaws.kafkaconnect#ListWorkerConfigurationsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.

" + } + }, + "workerConfigurations": { + "target": "com.amazonaws.kafkaconnect#__listOfWorkerConfigurationSummary", + "traits": { + "smithy.api#documentation": "

An array of worker configuration descriptions.

" + } + } + } + }, + "com.amazonaws.kafkaconnect#LogDelivery": { + "type": "structure", + "members": { + "workerLogDelivery": { + "target": "com.amazonaws.kafkaconnect#WorkerLogDelivery", + "traits": { + "smithy.api#documentation": "

The workers can send worker logs to different destination types. This configuration specifies the details of these destinations.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about log delivery.

" + } + }, + "com.amazonaws.kafkaconnect#LogDeliveryDescription": { + "type": "structure", + "members": { + "workerLogDelivery": { + "target": "com.amazonaws.kafkaconnect#WorkerLogDeliveryDescription", + "traits": { + "smithy.api#documentation": "

The workers can send worker logs to different destination types. This configuration specifies the details of these destinations.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The description of the log delivery settings.

" + } + }, + "com.amazonaws.kafkaconnect#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.kafkaconnect#NotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "

HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.

", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.kafkaconnect#Plugin": { + "type": "structure", + "members": { + "customPlugin": { + "target": "com.amazonaws.kafkaconnect#CustomPlugin", + "traits": { + "smithy.api#documentation": "

Details about a custom plugin.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

A plugin is an AWS resource that contains the code that defines your connector logic.

" + } + }, + "com.amazonaws.kafkaconnect#PluginDescription": { + "type": "structure", + "members": { + "customPlugin": { + "target": "com.amazonaws.kafkaconnect#CustomPluginDescription", + "traits": { + "smithy.api#documentation": "

Details about a custom plugin.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The description of the plugin.

" + } + }, + "com.amazonaws.kafkaconnect#ProvisionedCapacity": { + "type": "structure", + "members": { + "mcuCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max8", + "traits": { + "smithy.api#documentation": "

The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

", + "smithy.api#required": {} + } + }, + "workerCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "traits": { + "smithy.api#documentation": "

The number of workers that are allocated to the connector.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about a connector's provisioned capacity.

" + } + }, + "com.amazonaws.kafkaconnect#ProvisionedCapacityDescription": { + "type": "structure", + "members": { + "mcuCount": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "

The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

" + } + }, + "workerCount": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "

The number of workers that are allocated to the connector.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The description of a connector's provisioned capacity.

" + } + }, + "com.amazonaws.kafkaconnect#ProvisionedCapacityUpdate": { + "type": "structure", + "members": { + "mcuCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max8", + "traits": { + "smithy.api#documentation": "

The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.

", + "smithy.api#required": {} + } + }, + "workerCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "traits": { + "smithy.api#documentation": "

The number of workers that are allocated to the connector.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An update to a connector's fixed capacity.

" + } + }, + "com.amazonaws.kafkaconnect#S3Location": { + "type": "structure", + "members": { + "bucketArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an S3 bucket.

", + "smithy.api#required": {} + } + }, + "fileKey": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The file key for an object in an S3 bucket.

", + "smithy.api#required": {} + } + }, + "objectVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The version of an object in an S3 bucket.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The location of an object in Amazon S3.

" + } + }, + "com.amazonaws.kafkaconnect#S3LocationDescription": { + "type": "structure", + "members": { + "bucketArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of an S3 bucket.

" + } + }, + "fileKey": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The file key for an object in an S3 bucket.

" + } + }, + "objectVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The version of an object in an S3 bucket.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The description of the location of an object in Amazon S3.

" + } + }, + "com.amazonaws.kafkaconnect#S3LogDelivery": { + "type": "structure", + "members": { + "bucket": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the S3 bucket that is the destination for log delivery.

" + } + }, + "enabled": { + "target": "com.amazonaws.kafkaconnect#__boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether connector logs get sent to the specified Amazon S3 destination.

", + "smithy.api#required": {} + } + }, + "prefix": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The S3 prefix that is the destination for log delivery.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details about delivering logs to Amazon S3.

" + } + }, + "com.amazonaws.kafkaconnect#S3LogDeliveryDescription": { + "type": "structure", + "members": { + "bucket": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the S3 bucket that is the destination for log delivery.

" + } + }, + "enabled": { + "target": "com.amazonaws.kafkaconnect#__boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether connector logs get sent to the specified Amazon S3 destination.

" + } + }, + "prefix": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The S3 prefix that is the destination for log delivery.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The description of the details about delivering logs to Amazon S3.

" + } + }, + "com.amazonaws.kafkaconnect#ScaleInPolicy": { + "type": "structure", + "members": { + "cpuUtilizationPercentage": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max100", + "traits": { + "smithy.api#documentation": "

Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The scale-in policy for the connector.

" + } + }, + "com.amazonaws.kafkaconnect#ScaleInPolicyDescription": { + "type": "structure", + "members": { + "cpuUtilizationPercentage": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "

Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The description of the scale-in policy for the connector.

" + } + }, + "com.amazonaws.kafkaconnect#ScaleInPolicyUpdate": { + "type": "structure", + "members": { + "cpuUtilizationPercentage": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max100", + "traits": { + "smithy.api#documentation": "

The target CPU utilization percentage threshold at which you want connector scale in to be triggered.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An update to the connector's scale-in policy.

" + } + }, + "com.amazonaws.kafkaconnect#ScaleOutPolicy": { + "type": "structure", + "members": { + "cpuUtilizationPercentage": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max100", + "traits": { + "smithy.api#documentation": "

The CPU utilization percentage threshold at which you want connector scale out to be triggered.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The scale-out policy for the connector.

" + } + }, + "com.amazonaws.kafkaconnect#ScaleOutPolicyDescription": { + "type": "structure", + "members": { + "cpuUtilizationPercentage": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "

The CPU utilization percentage threshold at which you want connector scale out to be triggered.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The description of the scale-out policy for the connector.

" + } + }, + "com.amazonaws.kafkaconnect#ScaleOutPolicyUpdate": { + "type": "structure", + "members": { + "cpuUtilizationPercentage": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max100", + "traits": { + "smithy.api#documentation": "

The target CPU utilization percentage threshold at which you want connector scale out to be triggered.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

An update to the connector's scale-out policy.

" + } + }, + "com.amazonaws.kafkaconnect#ServiceUnavailableException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "

HTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.

", + "smithy.api#error": "server", + "smithy.api#httpError": 503 + } + }, + "com.amazonaws.kafkaconnect#TooManyRequestsException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "

HTTP Status Code 429: Limit exceeded. Resource limit reached.

", + "smithy.api#error": "client", + "smithy.api#httpError": 429 + } + }, + "com.amazonaws.kafkaconnect#UnauthorizedException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "

HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.

", + "smithy.api#error": "client", + "smithy.api#httpError": 401 + } + }, + "com.amazonaws.kafkaconnect#UpdateConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#UpdateConnectorRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#UpdateConnectorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates the specified connector.

", + "smithy.api#http": { + "method": "PUT", + "uri": "/v1/connectors/{connectorArn}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.kafkaconnect#UpdateConnectorRequest": { + "type": "structure", + "members": { + "capacity": { + "target": "com.amazonaws.kafkaconnect#CapacityUpdate", + "traits": { + "smithy.api#documentation": "

The target capacity.

", + "smithy.api#required": {} + } + }, + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector that you want to update.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "currentVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The current version of the connector that you want to update.

", + "smithy.api#httpQuery": "currentVersion", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafkaconnect#UpdateConnectorResponse": { + "type": "structure", + "members": { + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the connector.

" + } + }, + "connectorState": { + "target": "com.amazonaws.kafkaconnect#ConnectorState", + "traits": { + "smithy.api#documentation": "

The state of the connector.

" + } + } + } + }, + "com.amazonaws.kafkaconnect#Vpc": { + "type": "structure", + "members": { + "securityGroups": { + "target": "com.amazonaws.kafkaconnect#__listOf__string", + "traits": { + "smithy.api#documentation": "

The security groups for the connector.

" + } + }, + "subnets": { + "target": "com.amazonaws.kafkaconnect#__listOf__string", + "traits": { + "smithy.api#documentation": "

The subnets for the connector.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Information about the VPC in which the connector resides.

" + } + }, + "com.amazonaws.kafkaconnect#VpcDescription": { + "type": "structure", + "members": { + "securityGroups": { + "target": "com.amazonaws.kafkaconnect#__listOf__string", + "traits": { + "smithy.api#documentation": "

The security groups for the connector.

" + } + }, + "subnets": { + "target": "com.amazonaws.kafkaconnect#__listOf__string", + "traits": { + "smithy.api#documentation": "

The subnets for the connector.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The description of the VPC in which the connector resides.

" + } + }, + "com.amazonaws.kafkaconnect#WorkerConfiguration": { + "type": "structure", + "members": { + "revision": { + "target": "com.amazonaws.kafkaconnect#__longMin1", + "traits": { + "smithy.api#documentation": "

The revision of the worker configuration.

", + "smithy.api#required": {} + } + }, + "workerConfigurationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the worker configuration.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration of the workers, which are the processes that run the connector logic.

" + } + }, + "com.amazonaws.kafkaconnect#WorkerConfigurationDescription": { + "type": "structure", + "members": { + "revision": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "

The revision of the worker configuration.

" + } + }, + "workerConfigurationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the worker configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The description of the worker configuration.

" + } + }, + "com.amazonaws.kafkaconnect#WorkerConfigurationRevisionDescription": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time that the worker configuration was created.

" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The description of the worker configuration revision.

" + } + }, + "propertiesFileContent": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

Base64 encoded contents of the connect-distributed.properties file.

" + } + }, + "revision": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "

The description of a revision of the worker configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The description of the worker configuration revision.

" + } + }, + "com.amazonaws.kafkaconnect#WorkerConfigurationRevisionSummary": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time that a worker configuration revision was created.

" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The description of a worker configuration revision.

" + } + }, + "revision": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "

The revision of a worker configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The summary of a worker configuration revision.

" + } + }, + "com.amazonaws.kafkaconnect#WorkerConfigurationSummary": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

The time that a worker configuration was created.

" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The description of a worker configuration.

" + } + }, + "latestRevision": { + "target": "com.amazonaws.kafkaconnect#WorkerConfigurationRevisionSummary", + "traits": { + "smithy.api#documentation": "

The latest revision of a worker configuration.

" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The name of the worker configuration.

" + } + }, + "workerConfigurationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the worker configuration.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The summary of a worker configuration.

" + } + }, + "com.amazonaws.kafkaconnect#WorkerLogDelivery": { + "type": "structure", + "members": { + "cloudWatchLogs": { + "target": "com.amazonaws.kafkaconnect#CloudWatchLogsLogDelivery", + "traits": { + "smithy.api#documentation": "

Details about delivering logs to Amazon CloudWatch Logs.

" + } + }, + "firehose": { + "target": "com.amazonaws.kafkaconnect#FirehoseLogDelivery", + "traits": { + "smithy.api#documentation": "

Details about delivering logs to Amazon Kinesis Data Firehose.

" + } + }, + "s3": { + "target": "com.amazonaws.kafkaconnect#S3LogDelivery", + "traits": { + "smithy.api#documentation": "

Details about delivering logs to Amazon S3.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Workers can send worker logs to different destination types. This configuration specifies the details of these destinations.

" + } + }, + "com.amazonaws.kafkaconnect#WorkerLogDeliveryDescription": { + "type": "structure", + "members": { + "cloudWatchLogs": { + "target": "com.amazonaws.kafkaconnect#CloudWatchLogsLogDeliveryDescription", + "traits": { + "smithy.api#documentation": "

Details about delivering logs to Amazon CloudWatch Logs.

" + } + }, + "firehose": { + "target": "com.amazonaws.kafkaconnect#FirehoseLogDeliveryDescription", + "traits": { + "smithy.api#documentation": "

Details about delivering logs to Amazon Kinesis Data Firehose.

" + } + }, + "s3": { + "target": "com.amazonaws.kafkaconnect#S3LogDeliveryDescription", + "traits": { + "smithy.api#documentation": "

Details about delivering logs to Amazon S3.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Workers can send worker logs to different destination types. This configuration specifies the details of these destinations.

" + } + }, + "com.amazonaws.kafkaconnect#__boolean": { + "type": "boolean" + }, + "com.amazonaws.kafkaconnect#__integer": { + "type": "integer" + }, + "com.amazonaws.kafkaconnect#__integerMin1Max10": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.kafkaconnect#__integerMin1Max100": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.kafkaconnect#__integerMin1Max8": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 8 + } + } + }, + "com.amazonaws.kafkaconnect#__listOfConnectorSummary": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#ConnectorSummary" + } + }, + "com.amazonaws.kafkaconnect#__listOfCustomPluginSummary": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#CustomPluginSummary" + } + }, + "com.amazonaws.kafkaconnect#__listOfPlugin": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#Plugin" + } + }, + "com.amazonaws.kafkaconnect#__listOfPluginDescription": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#PluginDescription" + } + }, + "com.amazonaws.kafkaconnect#__listOfWorkerConfigurationSummary": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#WorkerConfigurationSummary" + } + }, + "com.amazonaws.kafkaconnect#__listOf__string": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "com.amazonaws.kafkaconnect#__long": { + "type": "long" + }, + "com.amazonaws.kafkaconnect#__longMin1": { + "type": "long", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 9223372036854775807 + } + } + }, + "com.amazonaws.kafkaconnect#__mapOf__string": { + "type": "map", + "key": { + "target": "com.amazonaws.kafkaconnect#__string" + }, + "value": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "com.amazonaws.kafkaconnect#__string": { + "type": "string" + }, + "com.amazonaws.kafkaconnect#__stringMax1024": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, + "com.amazonaws.kafkaconnect#__stringMin1Max128": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.kafkaconnect#__timestampIso8601": { + "type": "timestamp", + "traits": { + "smithy.api#timestampFormat": "date-time" + } + } + } +} \ No newline at end of file diff --git a/codegen/sdk-codegen/aws-models/macie2.2020-01-01.json b/codegen/sdk-codegen/aws-models/macie2.2020-01-01.json index 725e7764c8b..75e9bda29ec 100644 --- a/codegen/sdk-codegen/aws-models/macie2.2020-01-01.json +++ b/codegen/sdk-codegen/aws-models/macie2.2020-01-01.json @@ -447,7 +447,7 @@ "ids": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "

An array of strings that lists the unique identifiers for the custom data identifiers to retrieve information about.

", + "smithy.api#documentation": "

An array of custom data identifier IDs, one for each custom data identifier to retrieve information about.

", "smithy.api#jsonName": "ids" } } @@ -466,7 +466,7 @@ "notFoundIdentifierIds": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "

An array of identifiers, one for each identifier that was specified in the request, but doesn't correlate to an existing custom data identifier.

", + "smithy.api#documentation": "

An array of custom data identifier IDs, one for each custom data identifier that was specified in the request but doesn't correlate to an existing custom data identifier.

", "smithy.api#jsonName": "notFoundIdentifierIds" } } @@ -1255,7 +1255,7 @@ "customDataIdentifierIds": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "

The custom data identifiers to use for data analysis and classification.

", + "smithy.api#documentation": "

An array of unique identifiers, one for each custom data identifier for the job to use when it analyzes data. To use only managed data identifiers, don't specify a value for this property and specify a value other than NONE for the managedDataIdentifierSelector property.

", "smithy.api#jsonName": "customDataIdentifierIds" } }, @@ -1269,7 +1269,7 @@ "initialRun": { "target": "com.amazonaws.macie2#__boolean", "traits": { - "smithy.api#documentation": "

Specifies whether to analyze all existing, eligible objects immediately after the job is created.

", + "smithy.api#documentation": "

For a recurring job, specifies whether to analyze all existing, eligible objects immediately after the job is created (true). To analyze only those objects that are created or changed after you create the job and before the job's first scheduled run, set this value to false.

If you configure the job to run only once, don't specify a value for this property.

", "smithy.api#jsonName": "initialRun" } }, @@ -1281,6 +1281,20 @@ "smithy.api#required": {} } }, + "managedDataIdentifierIds": { + "target": "com.amazonaws.macie2#__listOf__string", + "traits": { + "smithy.api#documentation": "

An array of unique identifiers, one for each managed data identifier for the job to include (use) or exclude (not use) when it analyzes data. Inclusion or exclusion depends on the managed data identifier selection type that you specify for the job (managedDataIdentifierSelector).

To retrieve a list of valid values for this property, use the ListManagedDataIdentifiers operation.

", + "smithy.api#jsonName": "managedDataIdentifierIds" + } + }, + "managedDataIdentifierSelector": { + "target": "com.amazonaws.macie2#ManagedDataIdentifierSelector", + "traits": { + "smithy.api#documentation": "

The selection type to apply when determining which managed data identifiers the job uses to analyze data. Valid values are:

If you don't specify a value for this property, the job uses all managed data identifiers. If you don't specify a value for this property or you specify ALL or EXCLUDE for a recurring job, the job also uses new managed data identifiers as they are released.

", + "smithy.api#jsonName": "managedDataIdentifierSelector" + } + }, "name": { "target": "com.amazonaws.macie2#__string", "traits": { @@ -1300,7 +1314,7 @@ "samplingPercentage": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "

The sampling depth, as a percentage, to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at random, up to the specified percentage, and analyzes all the data in those objects.

", + "smithy.api#documentation": "

The sampling depth, as a percentage, for the job to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at random, up to the specified percentage, and analyzes all the data in those objects.

", "smithy.api#jsonName": "samplingPercentage" } }, @@ -1400,21 +1414,21 @@ "ignoreWords": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 characters. Ignore words are case sensitive.

", + "smithy.api#documentation": "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 UTF-8 characters. Ignore words are case sensitive.

", "smithy.api#jsonName": "ignoreWords" } }, "keywords": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 characters. Keywords aren't case sensitive.

", + "smithy.api#documentation": "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 UTF-8 characters. Keywords aren't case sensitive.

", "smithy.api#jsonName": "keywords" } }, "maximumMatchDistance": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.

", + "smithy.api#documentation": "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.

", "smithy.api#jsonName": "maximumMatchDistance" } }, @@ -2138,7 +2152,7 @@ } }, "traits": { - "smithy.api#documentation": "

Provides information about a type of sensitive data that was detected by managed data identifiers and produced a sensitive data finding.

" + "smithy.api#documentation": "

Provides information about a type of sensitive data that was detected by a managed data identifier and produced a sensitive data finding.

" } }, "com.amazonaws.macie2#DefaultDetections": { @@ -2558,7 +2572,7 @@ "customDataIdentifierIds": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "

The custom data identifiers that the job uses to analyze data.

", + "smithy.api#documentation": "

An array of unique identifiers, one for each custom data identifier that the job uses to analyze data. This value is null if the job uses only managed data identifiers to analyze data.

", "smithy.api#jsonName": "customDataIdentifierIds" } }, @@ -2572,7 +2586,7 @@ "initialRun": { "target": "com.amazonaws.macie2#__boolean", "traits": { - "smithy.api#documentation": "

Specifies whether the job is configured to analyze all existing, eligible objects immediately after it's created.

", + "smithy.api#documentation": "

For a recurring job, specifies whether you configured the job to analyze all existing, eligible objects immediately after the job was created (true). If you configured the job to analyze only those objects that were created or changed after the job was created and before the job's first scheduled run, this value is false. This value is also false for a one-time job.

", "smithy.api#jsonName": "initialRun" } }, @@ -2593,7 +2607,7 @@ "jobStatus": { "target": "com.amazonaws.macie2#JobStatus", "traits": { - "smithy.api#documentation": "

The current status of the job. Possible values are:

", + "smithy.api#documentation": "

The current status of the job. Possible values are:

", "smithy.api#jsonName": "jobStatus" } }, @@ -2618,6 +2632,20 @@ "smithy.api#jsonName": "lastRunTime" } }, + "managedDataIdentifierIds": { + "target": "com.amazonaws.macie2#__listOf__string", + "traits": { + "smithy.api#documentation": "

An array of unique identifiers, one for each managed data identifier that the job is explicitly configured to include (use) or exclude (not use) when it analyzes data. Inclusion or exclusion depends on the managed data identifier selection type specified for the job (managedDataIdentifierSelector). This value is null if the job's managed data identifier selection type is ALL or the job uses only custom data identifiers (customDataIdentifierIds) to analyze data.

", + "smithy.api#jsonName": "managedDataIdentifierIds" + } + }, + "managedDataIdentifierSelector": { + "target": "com.amazonaws.macie2#ManagedDataIdentifierSelector", + "traits": { + "smithy.api#documentation": "

The selection type that determines which managed data identifiers the job uses to analyze data. Possible values are:

If this value is null, the job uses all managed data identifiers. If this value is null, ALL, or EXCLUDE for a recurring job, the job also uses new managed data identifiers as they are released.

", + "smithy.api#jsonName": "managedDataIdentifierSelector" + } + }, "name": { "target": "com.amazonaws.macie2#__string", "traits": { @@ -2642,7 +2670,7 @@ "scheduleFrequency": { "target": "com.amazonaws.macie2#JobScheduleFrequency", "traits": { - "smithy.api#documentation": "

The recurrence pattern for running the job. If the job is configured to run only once, this value is null.

", + "smithy.api#documentation": "

The recurrence pattern for running the job. This value is null if the job is configured to run only once.

", "smithy.api#jsonName": "scheduleFrequency" } }, @@ -4004,7 +4032,7 @@ "maximumMatchDistance": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern.

", + "smithy.api#documentation": "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern.

", "smithy.api#jsonName": "maximumMatchDistance" } }, @@ -5370,7 +5398,7 @@ "jobStatus": { "target": "com.amazonaws.macie2#JobStatus", "traits": { - "smithy.api#documentation": "

The current status of the job. Possible values are:

", + "smithy.api#documentation": "

The current status of the job. Possible values are:

", "smithy.api#jsonName": "jobStatus" } }, @@ -5888,7 +5916,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves information about all the Amazon Macie membership invitations that were received by an account.

", + "smithy.api#documentation": "

Retrieves information about the Amazon Macie membership invitations that were received by an account.

", "smithy.api#http": { "method": "GET", "uri": "/invitations", @@ -6061,6 +6089,54 @@ "smithy.api#documentation": "

Specifies criteria for sorting the results of a request for information about classification jobs.

" } }, + "com.amazonaws.macie2#ListManagedDataIdentifiers": { + "type": "operation", + "input": { + "target": "com.amazonaws.macie2#ListManagedDataIdentifiersRequest" + }, + "output": { + "target": "com.amazonaws.macie2#ListManagedDataIdentifiersResponse" + }, + "traits": { + "smithy.api#documentation": "

Retrieves information about all the managed data identifiers that Amazon Macie currently provides.

", + "smithy.api#http": { + "method": "POST", + "uri": "/managed-data-identifiers/list", + "code": 200 + } + } + }, + "com.amazonaws.macie2#ListManagedDataIdentifiersRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.macie2#__string", + "traits": { + "smithy.api#documentation": "

The nextToken string that specifies which page of results to return in a paginated response.

", + "smithy.api#jsonName": "nextToken" + } + } + } + }, + "com.amazonaws.macie2#ListManagedDataIdentifiersResponse": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.macie2#__listOfManagedDataIdentifierSummary", + "traits": { + "smithy.api#documentation": "

An array of objects, one for each managed data identifier.

", + "smithy.api#jsonName": "items" + } + }, + "nextToken": { + "target": "com.amazonaws.macie2#__string", + "traits": { + "smithy.api#documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

", + "smithy.api#jsonName": "nextToken" + } + } + } + }, "com.amazonaws.macie2#ListMembers": { "type": "operation", "input": { @@ -6408,6 +6484,9 @@ { "target": "com.amazonaws.macie2#ListInvitations" }, + { + "target": "com.amazonaws.macie2#ListManagedDataIdentifiers" + }, { "target": "com.amazonaws.macie2#ListMembers" }, @@ -6483,6 +6562,52 @@ ] } }, + "com.amazonaws.macie2#ManagedDataIdentifierSelector": { + "type": "string", + "traits": { + "smithy.api#documentation": "

The selection type that determines which managed data identifiers a classification job uses to analyze data. Valid values are:

", + "smithy.api#enum": [ + { + "value": "ALL", + "name": "ALL" + }, + { + "value": "EXCLUDE", + "name": "EXCLUDE" + }, + { + "value": "INCLUDE", + "name": "INCLUDE" + }, + { + "value": "NONE", + "name": "NONE" + } + ] + } + }, + "com.amazonaws.macie2#ManagedDataIdentifierSummary": { + "type": "structure", + "members": { + "category": { + "target": "com.amazonaws.macie2#SensitiveDataItemCategory", + "traits": { + "smithy.api#documentation": "

The category of sensitive data that the managed data identifier detects: CREDENTIALS, for credentials data such as private keys or Amazon Web Services secret keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as passport numbers.

", + "smithy.api#jsonName": "category" + } + }, + "id": { + "target": "com.amazonaws.macie2#__string", + "traits": { + "smithy.api#documentation": "

The unique identifier for the managed data identifier. This is a string that describes the type of sensitive data that the managed data identifier detects. For example: OPENSSH_PRIVATE_KEY for OpenSSH private keys, CREDIT_CARD_NUMBER for credit card numbers, or USA_PASSPORT_NUMBER for US passport numbers.

", + "smithy.api#jsonName": "id" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides information about a managed data identifier. For additional information, see Using managed data identifiers in the Amazon Macie User Guide.

" + } + }, "com.amazonaws.macie2#MatchingBucket": { "type": "structure", "members": { @@ -6790,7 +6915,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies the location of 1-15 occurrences of sensitive data that was detected by managed data identifiers or a custom data identifier and produced a sensitive data finding.

" + "smithy.api#documentation": "

Specifies the location of 1-15 occurrences of sensitive data that was detected by a managed data identifier or a custom data identifier and produced a sensitive data finding.

" } }, "com.amazonaws.macie2#OrderBy": { @@ -7217,7 +7342,7 @@ "owner": { "target": "com.amazonaws.macie2#S3BucketOwner", "traits": { - "smithy.api#documentation": "

The display name and Amazon Web Services account ID for the user who owns the bucket.

", + "smithy.api#documentation": "

The display name and canonical user ID for the Amazon Web Services account that owns the bucket.

", "smithy.api#jsonName": "owner" } }, @@ -7292,20 +7417,20 @@ "displayName": { "target": "com.amazonaws.macie2#__string", "traits": { - "smithy.api#documentation": "

The display name of the user who owns the bucket.

", + "smithy.api#documentation": "

The display name of the account that owns the bucket.

", "smithy.api#jsonName": "displayName" } }, "id": { "target": "com.amazonaws.macie2#__string", "traits": { - "smithy.api#documentation": "

The Amazon Web Services account ID for the user who owns the bucket.

", + "smithy.api#documentation": "

The canonical user ID for the account that owns the bucket.

", "smithy.api#jsonName": "id" } } }, "traits": { - "smithy.api#documentation": "

Provides information about the user who owns an S3 bucket.

" + "smithy.api#documentation": "

Provides information about the Amazon Web Services account that owns an S3 bucket.

" } }, "com.amazonaws.macie2#S3Destination": { @@ -7861,7 +7986,7 @@ "category": { "target": "com.amazonaws.macie2#SensitiveDataItemCategory", "traits": { - "smithy.api#documentation": "

The category of sensitive data that was detected. For example: CREDENTIALS, for credentials data such as private keys or Amazon Web Services secret keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as driver's license identification numbers.

", + "smithy.api#documentation": "

The category of sensitive data that was detected. For example: CREDENTIALS, for credentials data such as private keys or Amazon Web Services secret keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as passport numbers.

", "smithy.api#jsonName": "category" } }, @@ -7887,7 +8012,7 @@ "com.amazonaws.macie2#SensitiveDataItemCategory": { "type": "string", "traits": { - "smithy.api#documentation": "

The category of sensitive data that was detected and produced the finding. Possible values are:

", + "smithy.api#documentation": "

For a finding, the category of sensitive data that was detected and produced the finding. For a managed data identifier, the category of sensitive data that the managed data identifier detects. Possible values are:

", "smithy.api#enum": [ { "value": "FINANCIAL_INFORMATION", @@ -8504,21 +8629,21 @@ "ignoreWords": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 characters. Ignore words are case sensitive.

", + "smithy.api#documentation": "

An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 UTF-8 characters. Ignore words are case sensitive.

", "smithy.api#jsonName": "ignoreWords" } }, "keywords": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 characters. Keywords aren't case sensitive.

", + "smithy.api#documentation": "

An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 UTF-8 characters. Keywords aren't case sensitive.

", "smithy.api#jsonName": "keywords" } }, "maximumMatchDistance": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.

", + "smithy.api#documentation": "

The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.

", "smithy.api#jsonName": "maximumMatchDistance" } }, @@ -9568,6 +9693,12 @@ "target": "com.amazonaws.macie2#ListJobsFilterTerm" } }, + "com.amazonaws.macie2#__listOfManagedDataIdentifierSummary": { + "type": "list", + "member": { + "target": "com.amazonaws.macie2#ManagedDataIdentifierSummary" + } + }, "com.amazonaws.macie2#__listOfMatchingResource": { "type": "list", "member": { diff --git a/codegen/sdk-codegen/aws-models/pinpoint.2016-12-01.json b/codegen/sdk-codegen/aws-models/pinpoint.2016-12-01.json index e03cc6c575c..8a3de2b31db 100644 --- a/codegen/sdk-codegen/aws-models/pinpoint.2016-12-01.json +++ b/codegen/sdk-codegen/aws-models/pinpoint.2016-12-01.json @@ -1185,6 +1185,25 @@ "smithy.api#documentation": "

Specifies address-based configuration settings for a message that's sent directly to an endpoint.

" } }, + "com.amazonaws.pinpoint#Alignment": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "LEFT", + "name": "LEFT" + }, + { + "value": "CENTER", + "name": "CENTER" + }, + { + "value": "RIGHT", + "name": "RIGHT" + } + ] + } + }, "com.amazonaws.pinpoint#AndroidPushNotificationTemplate": { "type": "structure", "members": { @@ -1708,6 +1727,25 @@ "smithy.api#documentation": "

Provides the results of a query that retrieved the data for a standard metric that applies to an application, campaign, or journey.

" } }, + "com.amazonaws.pinpoint#ButtonAction": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "LINK", + "name": "LINK" + }, + { + "value": "DEEP_LINK", + "name": "DEEP_LINK" + }, + { + "value": "CLOSE", + "name": "CLOSE" + } + ] + } + }, "com.amazonaws.pinpoint#CampaignCustomMessage": { "type": "structure", "members": { @@ -1858,6 +1896,38 @@ "smithy.api#documentation": "

Specifies settings for invoking an AWS Lambda function that customizes a segment for a campaign.

" } }, + "com.amazonaws.pinpoint#CampaignInAppMessage": { + "type": "structure", + "members": { + "Body": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The message body of the notification, the email body or the text message.

" + } + }, + "Content": { + "target": "com.amazonaws.pinpoint#ListOfInAppMessageContent", + "traits": { + "smithy.api#documentation": "

In-app message content.

" + } + }, + "CustomConfig": { + "target": "com.amazonaws.pinpoint#MapOf__string", + "traits": { + "smithy.api#documentation": "

Custom config to be sent to client.

" + } + }, + "Layout": { + "target": "com.amazonaws.pinpoint#Layout", + "traits": { + "smithy.api#documentation": "

In-app message layout.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

In-app message configuration.

" + } + }, "com.amazonaws.pinpoint#CampaignLimits": { "type": "structure", "members": { @@ -1884,6 +1954,12 @@ "traits": { "smithy.api#documentation": "

The maximum number of messages that a campaign can send to a single endpoint during the course of the campaign. If a campaign recurs, this setting applies to all runs of the campaign. The maximum value is 100.

" } + }, + "Session": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "

The maximum total number of messages that the campaign can send per user session.

" + } } }, "traits": { @@ -2044,6 +2120,12 @@ "traits": { "smithy.api#documentation": "

The version number of the campaign.

" } + }, + "Priority": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "

Defines the priority of the campaign, used to decide the order of messages displayed to user if there are multiple messages scheduled to be displayed at the same moment.

" + } } }, "traits": { @@ -2277,6 +2359,10 @@ { "value": "CUSTOM", "name": "CUSTOM" + }, + { + "value": "IN_APP", + "name": "IN_APP" } ] } @@ -2738,6 +2824,72 @@ } } }, + "com.amazonaws.pinpoint#CreateInAppTemplate": { + "type": "operation", + "input": { + "target": "com.amazonaws.pinpoint#CreateInAppTemplateRequest" + }, + "output": { + "target": "com.amazonaws.pinpoint#CreateInAppTemplateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pinpoint#BadRequestException" + }, + { + "target": "com.amazonaws.pinpoint#ForbiddenException" + }, + { + "target": "com.amazonaws.pinpoint#InternalServerErrorException" + }, + { + "target": "com.amazonaws.pinpoint#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.pinpoint#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a new message template for messages using the in-app message channel.

", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/templates/{TemplateName}/inapp", + "code": 201 + } + } + }, + "com.amazonaws.pinpoint#CreateInAppTemplateRequest": { + "type": "structure", + "members": { + "InAppTemplateRequest": { + "target": "com.amazonaws.pinpoint#InAppTemplateRequest", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + }, + "TemplateName": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.pinpoint#CreateInAppTemplateResponse": { + "type": "structure", + "members": { + "TemplateCreateMessageBody": { + "target": "com.amazonaws.pinpoint#TemplateCreateMessageBody", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.pinpoint#CreateJourney": { "type": "operation", "input": { @@ -3299,6 +3451,52 @@ "smithy.api#documentation": "

The settings for a custom message activity. This type of activity calls an AWS Lambda function or web hook that sends messages to participants.

" } }, + "com.amazonaws.pinpoint#DefaultButtonConfiguration": { + "type": "structure", + "members": { + "BackgroundColor": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The background color of the button.

" + } + }, + "BorderRadius": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "

The border radius of the button.

" + } + }, + "ButtonAction": { + "target": "com.amazonaws.pinpoint#ButtonAction", + "traits": { + "smithy.api#documentation": "

Action triggered by the button.

", + "smithy.api#required": {} + } + }, + "Link": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

Button destination.

" + } + }, + "Text": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

Button text.

", + "smithy.api#required": {} + } + }, + "TextColor": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The text color of the button.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Default button configuration.

" + } + }, "com.amazonaws.pinpoint#DefaultMessage": { "type": "structure", "members": { @@ -4275,6 +4473,78 @@ } } }, + "com.amazonaws.pinpoint#DeleteInAppTemplate": { + "type": "operation", + "input": { + "target": "com.amazonaws.pinpoint#DeleteInAppTemplateRequest" + }, + "output": { + "target": "com.amazonaws.pinpoint#DeleteInAppTemplateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pinpoint#BadRequestException" + }, + { + "target": "com.amazonaws.pinpoint#ForbiddenException" + }, + { + "target": "com.amazonaws.pinpoint#InternalServerErrorException" + }, + { + "target": "com.amazonaws.pinpoint#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.pinpoint#NotFoundException" + }, + { + "target": "com.amazonaws.pinpoint#PayloadTooLargeException" + }, + { + "target": "com.amazonaws.pinpoint#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes a message template for messages sent using the in-app message channel.

", + "smithy.api#http": { + "method": "DELETE", + "uri": "/v1/templates/{TemplateName}/inapp", + "code": 202 + } + } + }, + "com.amazonaws.pinpoint#DeleteInAppTemplateRequest": { + "type": "structure", + "members": { + "TemplateName": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Version": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier for an existing template version. If specified for an update operation, this value must match the identifier for the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

", + "smithy.api#httpQuery": "version" + } + } + } + }, + "com.amazonaws.pinpoint#DeleteInAppTemplateResponse": { + "type": "structure", + "members": { + "MessageBody": { + "target": "com.amazonaws.pinpoint#MessageBody", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.pinpoint#DeleteJourney": { "type": "operation", "input": { @@ -6439,6 +6709,10 @@ { "value": "EVENT", "name": "EVENT" + }, + { + "value": "IN_APP_EVENT", + "name": "IN_APP_EVENT" } ] } @@ -8610,13 +8884,13 @@ } } }, - "com.amazonaws.pinpoint#GetJourney": { + "com.amazonaws.pinpoint#GetInAppMessages": { "type": "operation", "input": { - "target": "com.amazonaws.pinpoint#GetJourneyRequest" + "target": "com.amazonaws.pinpoint#GetInAppMessagesRequest" }, "output": { - "target": "com.amazonaws.pinpoint#GetJourneyResponse" + "target": "com.amazonaws.pinpoint#GetInAppMessagesResponse" }, "errors": [ { @@ -8642,21 +8916,54 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves information about the status, configuration, and other settings for a journey.

", + "smithy.api#documentation": "

Retrieves the in-app messages targeted for the provided endpoint ID.

", "smithy.api#http": { "method": "GET", - "uri": "/v1/apps/{ApplicationId}/journeys/{JourneyId}", + "uri": "/v1/apps/{ApplicationId}/endpoints/{EndpointId}/inappmessages", "code": 200 } } }, - "com.amazonaws.pinpoint#GetJourneyDateRangeKpi": { + "com.amazonaws.pinpoint#GetInAppMessagesRequest": { + "type": "structure", + "members": { + "ApplicationId": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "EndpointId": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The unique identifier for the endpoint.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.pinpoint#GetInAppMessagesResponse": { + "type": "structure", + "members": { + "InAppMessagesResponse": { + "target": "com.amazonaws.pinpoint#InAppMessagesResponse", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.pinpoint#GetInAppTemplate": { "type": "operation", "input": { - "target": "com.amazonaws.pinpoint#GetJourneyDateRangeKpiRequest" + "target": "com.amazonaws.pinpoint#GetInAppTemplateRequest" }, "output": { - "target": "com.amazonaws.pinpoint#GetJourneyDateRangeKpiResponse" + "target": "com.amazonaws.pinpoint#GetInAppTemplateResponse" }, "errors": [ { @@ -8682,21 +8989,133 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves (queries) pre-aggregated data for a standard engagement metric that applies to a journey.

", + "smithy.api#documentation": "

Retrieves the content and settings of a message template for messages sent through the in-app channel.

", "smithy.api#http": { "method": "GET", - "uri": "/v1/apps/{ApplicationId}/journeys/{JourneyId}/kpis/daterange/{KpiName}", + "uri": "/v1/templates/{TemplateName}/inapp", "code": 200 } } }, - "com.amazonaws.pinpoint#GetJourneyDateRangeKpiRequest": { + "com.amazonaws.pinpoint#GetInAppTemplateRequest": { "type": "structure", "members": { - "ApplicationId": { + "TemplateName": { "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "

The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.

", + "smithy.api#documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Version": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier for an existing template version. If specified for an update operation, this value must match the identifier for the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

", + "smithy.api#httpQuery": "version" + } + } + } + }, + "com.amazonaws.pinpoint#GetInAppTemplateResponse": { + "type": "structure", + "members": { + "InAppTemplateResponse": { + "target": "com.amazonaws.pinpoint#InAppTemplateResponse", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.pinpoint#GetJourney": { + "type": "operation", + "input": { + "target": "com.amazonaws.pinpoint#GetJourneyRequest" + }, + "output": { + "target": "com.amazonaws.pinpoint#GetJourneyResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pinpoint#BadRequestException" + }, + { + "target": "com.amazonaws.pinpoint#ForbiddenException" + }, + { + "target": "com.amazonaws.pinpoint#InternalServerErrorException" + }, + { + "target": "com.amazonaws.pinpoint#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.pinpoint#NotFoundException" + }, + { + "target": "com.amazonaws.pinpoint#PayloadTooLargeException" + }, + { + "target": "com.amazonaws.pinpoint#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves information about the status, configuration, and other settings for a journey.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/apps/{ApplicationId}/journeys/{JourneyId}", + "code": 200 + } + } + }, + "com.amazonaws.pinpoint#GetJourneyDateRangeKpi": { + "type": "operation", + "input": { + "target": "com.amazonaws.pinpoint#GetJourneyDateRangeKpiRequest" + }, + "output": { + "target": "com.amazonaws.pinpoint#GetJourneyDateRangeKpiResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pinpoint#BadRequestException" + }, + { + "target": "com.amazonaws.pinpoint#ForbiddenException" + }, + { + "target": "com.amazonaws.pinpoint#InternalServerErrorException" + }, + { + "target": "com.amazonaws.pinpoint#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.pinpoint#NotFoundException" + }, + { + "target": "com.amazonaws.pinpoint#PayloadTooLargeException" + }, + { + "target": "com.amazonaws.pinpoint#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "

Retrieves (queries) pre-aggregated data for a standard engagement metric that applies to a journey.

", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/apps/{ApplicationId}/journeys/{JourneyId}/kpis/daterange/{KpiName}", + "code": 200 + } + } + }, + "com.amazonaws.pinpoint#GetJourneyDateRangeKpiRequest": { + "type": "structure", + "members": { + "ApplicationId": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -10159,120 +10578,494 @@ } }, "traits": { - "smithy.api#documentation": "

Provides information about the resource settings for a job that imports endpoint definitions from one or more files. The files can be stored in an Amazon Simple Storage Service (Amazon S3) bucket or uploaded directly from a computer by using the Amazon Pinpoint console.

" + "smithy.api#documentation": "

Provides information about the resource settings for a job that imports endpoint definitions from one or more files. The files can be stored in an Amazon Simple Storage Service (Amazon S3) bucket or uploaded directly from a computer by using the Amazon Pinpoint console.

" + } + }, + "com.amazonaws.pinpoint#ImportJobResponse": { + "type": "structure", + "members": { + "ApplicationId": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The unique identifier for the application that's associated with the import job.

", + "smithy.api#required": {} + } + }, + "CompletedPieces": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "

The number of pieces that were processed successfully (completed) by the import job, as of the time of the request.

" + } + }, + "CompletionDate": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The date, in ISO 8601 format, when the import job was completed.

" + } + }, + "CreationDate": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The date, in ISO 8601 format, when the import job was created.

", + "smithy.api#required": {} + } + }, + "Definition": { + "target": "com.amazonaws.pinpoint#ImportJobResource", + "traits": { + "smithy.api#documentation": "

The resource settings that apply to the import job.

", + "smithy.api#required": {} + } + }, + "FailedPieces": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "

The number of pieces that weren't processed successfully (failed) by the import job, as of the time of the request.

" + } + }, + "Failures": { + "target": "com.amazonaws.pinpoint#ListOf__string", + "traits": { + "smithy.api#documentation": "

An array of entries, one for each of the first 100 entries that weren't processed successfully (failed) by the import job, if any.

" + } + }, + "Id": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The unique identifier for the import job.

", + "smithy.api#required": {} + } + }, + "JobStatus": { + "target": "com.amazonaws.pinpoint#JobStatus", + "traits": { + "smithy.api#documentation": "

The status of the import job. The job status is FAILED if Amazon Pinpoint wasn't able to process one or more pieces in the job.

", + "smithy.api#required": {} + } + }, + "TotalFailures": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "

The total number of endpoint definitions that weren't processed successfully (failed) by the import job, typically because an error, such as a syntax error, occurred.

" + } + }, + "TotalPieces": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "

The total number of pieces that must be processed to complete the import job. Each piece consists of an approximately equal portion of the endpoint definitions that are part of the import job.

" + } + }, + "TotalProcessed": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "

The total number of endpoint definitions that were processed by the import job.

" + } + }, + "Type": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The job type. This value is IMPORT for import jobs.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides information about the status and settings of a job that imports endpoint definitions from one or more files. The files can be stored in an Amazon Simple Storage Service (Amazon S3) bucket or uploaded directly from a computer by using the Amazon Pinpoint console.

" + } + }, + "com.amazonaws.pinpoint#ImportJobsResponse": { + "type": "structure", + "members": { + "Item": { + "target": "com.amazonaws.pinpoint#ListOfImportJobResponse", + "traits": { + "smithy.api#documentation": "

An array of responses, one for each import job that's associated with the application (Import Jobs resource) or segment (Segment Import Jobs resource).

", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides information about the status and settings of all the import jobs that are associated with an application or segment. An import job is a job that imports endpoint definitions from one or more files.

" + } + }, + "com.amazonaws.pinpoint#InAppCampaignSchedule": { + "type": "structure", + "members": { + "EndDate": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The scheduled time after which the in-app message should not be shown. Timestamp is in ISO 8601 format.

" + } + }, + "EventFilter": { + "target": "com.amazonaws.pinpoint#CampaignEventFilter", + "traits": { + "smithy.api#documentation": "

The event filter the SDK has to use to show the in-app message in the application.

" + } + }, + "QuietTime": { + "target": "com.amazonaws.pinpoint#QuietTime", + "traits": { + "smithy.api#documentation": "

Time during which the in-app message should not be shown to the user.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Schedule of the campaign.

" + } + }, + "com.amazonaws.pinpoint#InAppMessage": { + "type": "structure", + "members": { + "Content": { + "target": "com.amazonaws.pinpoint#ListOfInAppMessageContent", + "traits": { + "smithy.api#documentation": "

In-app message content.

" + } + }, + "CustomConfig": { + "target": "com.amazonaws.pinpoint#MapOf__string", + "traits": { + "smithy.api#documentation": "

Custom config to be sent to SDK.

" + } + }, + "Layout": { + "target": "com.amazonaws.pinpoint#Layout", + "traits": { + "smithy.api#documentation": "

The layout of the message.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides all fields required for building an in-app message.

" + } + }, + "com.amazonaws.pinpoint#InAppMessageBodyConfig": { + "type": "structure", + "members": { + "Alignment": { + "target": "com.amazonaws.pinpoint#Alignment", + "traits": { + "smithy.api#documentation": "

The alignment of the text. Valid values: LEFT, CENTER, RIGHT.

", + "smithy.api#required": {} + } + }, + "Body": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

Message Body.

", + "smithy.api#required": {} + } + }, + "TextColor": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The text color.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Text config for Message Body.

" + } + }, + "com.amazonaws.pinpoint#InAppMessageButton": { + "type": "structure", + "members": { + "Android": { + "target": "com.amazonaws.pinpoint#OverrideButtonConfiguration", + "traits": { + "smithy.api#documentation": "

Default button content.

" + } + }, + "DefaultConfig": { + "target": "com.amazonaws.pinpoint#DefaultButtonConfiguration", + "traits": { + "smithy.api#documentation": "

Default button content.

" + } + }, + "IOS": { + "target": "com.amazonaws.pinpoint#OverrideButtonConfiguration", + "traits": { + "smithy.api#documentation": "

Default button content.

" + } + }, + "Web": { + "target": "com.amazonaws.pinpoint#OverrideButtonConfiguration", + "traits": { + "smithy.api#documentation": "

Default button content.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Button Config for an in-app message.

" + } + }, + "com.amazonaws.pinpoint#InAppMessageCampaign": { + "type": "structure", + "members": { + "CampaignId": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

Campaign id of the corresponding campaign.

" + } + }, + "DailyCap": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "

Daily cap which controls the number of times any in-app messages can be shown to the endpoint during a day.

" + } + }, + "InAppMessage": { + "target": "com.amazonaws.pinpoint#InAppMessage", + "traits": { + "smithy.api#documentation": "

In-app message content with all fields required for rendering an in-app message.

" + } + }, + "Priority": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "

Priority of the in-app message.

" + } + }, + "Schedule": { + "target": "com.amazonaws.pinpoint#InAppCampaignSchedule", + "traits": { + "smithy.api#documentation": "

Schedule of the campaign.

" + } + }, + "SessionCap": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "

Session cap which controls the number of times an in-app message can be shown to the endpoint during an application session.

" + } + }, + "TotalCap": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "

Total cap which controls the number of times an in-app message can be shown to the endpoint.

" + } + }, + "TreatmentId": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

Treatment id of the campaign.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Targeted in-app message campaign.

" + } + }, + "com.amazonaws.pinpoint#InAppMessageContent": { + "type": "structure", + "members": { + "BackgroundColor": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The background color for the message.

" + } + }, + "BodyConfig": { + "target": "com.amazonaws.pinpoint#InAppMessageBodyConfig", + "traits": { + "smithy.api#documentation": "

The configuration for the message body.

" + } + }, + "HeaderConfig": { + "target": "com.amazonaws.pinpoint#InAppMessageHeaderConfig", + "traits": { + "smithy.api#documentation": "

The configuration for the message header.

" + } + }, + "ImageUrl": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The image url for the background of message.

" + } + }, + "PrimaryBtn": { + "target": "com.amazonaws.pinpoint#InAppMessageButton", + "traits": { + "smithy.api#documentation": "

The first button inside the message.

" + } + }, + "SecondaryBtn": { + "target": "com.amazonaws.pinpoint#InAppMessageButton", + "traits": { + "smithy.api#documentation": "

The second button inside message.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The configuration for the message content.

" + } + }, + "com.amazonaws.pinpoint#InAppMessageHeaderConfig": { + "type": "structure", + "members": { + "Alignment": { + "target": "com.amazonaws.pinpoint#Alignment", + "traits": { + "smithy.api#documentation": "

The alignment of the text. Valid values: LEFT, CENTER, RIGHT.

", + "smithy.api#required": {} + } + }, + "Header": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

Message Header.

", + "smithy.api#required": {} + } + }, + "TextColor": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The text color.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

Text config for Message Header.

" + } + }, + "com.amazonaws.pinpoint#InAppMessagesResponse": { + "type": "structure", + "members": { + "InAppMessageCampaigns": { + "target": "com.amazonaws.pinpoint#ListOfInAppMessageCampaign", + "traits": { + "smithy.api#documentation": "

List of targeted in-app message campaigns.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Get in-app messages response object.

" } }, - "com.amazonaws.pinpoint#ImportJobResponse": { + "com.amazonaws.pinpoint#InAppTemplateRequest": { "type": "structure", "members": { - "ApplicationId": { - "target": "com.amazonaws.pinpoint#__string", + "Content": { + "target": "com.amazonaws.pinpoint#ListOfInAppMessageContent", "traits": { - "smithy.api#documentation": "

The unique identifier for the application that's associated with the import job.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The content of the message, can include up to 5 modals. Each modal must contain a message, a header, and background color. ImageUrl and buttons are optional.

" } }, - "CompletedPieces": { - "target": "com.amazonaws.pinpoint#__integer", + "CustomConfig": { + "target": "com.amazonaws.pinpoint#MapOf__string", "traits": { - "smithy.api#documentation": "

The number of pieces that were processed successfully (completed) by the import job, as of the time of the request.

" + "smithy.api#documentation": "

Custom config to be sent to client.

" } }, - "CompletionDate": { - "target": "com.amazonaws.pinpoint#__string", + "Layout": { + "target": "com.amazonaws.pinpoint#Layout", "traits": { - "smithy.api#documentation": "

The date, in ISO 8601 format, when the import job was completed.

" + "smithy.api#documentation": "

The layout of the message.

" } }, - "CreationDate": { - "target": "com.amazonaws.pinpoint#__string", + "tags": { + "target": "com.amazonaws.pinpoint#MapOf__string", "traits": { - "smithy.api#documentation": "

The date, in ISO 8601 format, when the import job was created.

", - "smithy.api#required": {} + "smithy.api#documentation": "

A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

", + "smithy.api#jsonName": "tags" } }, - "Definition": { - "target": "com.amazonaws.pinpoint#ImportJobResource", + "TemplateDescription": { + "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "

The resource settings that apply to the import job.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The description of the template.

" } - }, - "FailedPieces": { - "target": "com.amazonaws.pinpoint#__integer", + } + }, + "traits": { + "smithy.api#documentation": "

InApp Template Request.

" + } + }, + "com.amazonaws.pinpoint#InAppTemplateResponse": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "

The number of pieces that weren't processed successfully (failed) by the import job, as of the time of the request.

" + "smithy.api#documentation": "

The resource arn of the template.

" } }, - "Failures": { - "target": "com.amazonaws.pinpoint#ListOf__string", + "Content": { + "target": "com.amazonaws.pinpoint#ListOfInAppMessageContent", "traits": { - "smithy.api#documentation": "

An array of entries, one for each of the first 100 entries that weren't processed successfully (failed) by the import job, if any.

" + "smithy.api#documentation": "

The content of the message, can include up to 5 modals. Each modal must contain a message, a header, and background color. ImageUrl and buttons are optional.

" } }, - "Id": { + "CreationDate": { "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "

The unique identifier for the import job.

", + "smithy.api#documentation": "

The creation date of the template.

", "smithy.api#required": {} } }, - "JobStatus": { - "target": "com.amazonaws.pinpoint#JobStatus", + "CustomConfig": { + "target": "com.amazonaws.pinpoint#MapOf__string", "traits": { - "smithy.api#documentation": "

The status of the import job. The job status is FAILED if Amazon Pinpoint wasn't able to process one or more pieces in the job.

", + "smithy.api#documentation": "

Custom config to be sent to client.

" + } + }, + "LastModifiedDate": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The last modified date of the template.

", "smithy.api#required": {} } }, - "TotalFailures": { - "target": "com.amazonaws.pinpoint#__integer", + "Layout": { + "target": "com.amazonaws.pinpoint#Layout", "traits": { - "smithy.api#documentation": "

The total number of endpoint definitions that weren't processed successfully (failed) by the import job, typically because an error, such as a syntax error, occurred.

" + "smithy.api#documentation": "

The layout of the message.

" } }, - "TotalPieces": { - "target": "com.amazonaws.pinpoint#__integer", + "tags": { + "target": "com.amazonaws.pinpoint#MapOf__string", "traits": { - "smithy.api#documentation": "

The total number of pieces that must be processed to complete the import job. Each piece consists of an approximately equal portion of the endpoint definitions that are part of the import job.

" + "smithy.api#documentation": "

A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.

", + "smithy.api#jsonName": "tags" } }, - "TotalProcessed": { - "target": "com.amazonaws.pinpoint#__integer", + "TemplateDescription": { + "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "

The total number of endpoint definitions that were processed by the import job.

" + "smithy.api#documentation": "

The description of the template.

" } }, - "Type": { + "TemplateName": { "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "

The job type. This value is IMPORT for import jobs.

", + "smithy.api#documentation": "

The name of the template.

", "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#documentation": "

Provides information about the status and settings of a job that imports endpoint definitions from one or more files. The files can be stored in an Amazon Simple Storage Service (Amazon S3) bucket or uploaded directly from a computer by using the Amazon Pinpoint console.

" - } - }, - "com.amazonaws.pinpoint#ImportJobsResponse": { - "type": "structure", - "members": { - "Item": { - "target": "com.amazonaws.pinpoint#ListOfImportJobResponse", + }, + "TemplateType": { + "target": "com.amazonaws.pinpoint#TemplateType", "traits": { - "smithy.api#documentation": "

An array of responses, one for each import job that's associated with the application (Import Jobs resource) or segment (Segment Import Jobs resource).

", + "smithy.api#documentation": "

The type of the template.

", "smithy.api#required": {} } }, - "NextToken": { + "Version": { "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "

The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.

" + "smithy.api#documentation": "

The version id of the template.

" } } }, "traits": { - "smithy.api#documentation": "

Provides information about the status and settings of all the import jobs that are associated with an application or segment. An import job is a job that imports endpoint definitions from one or more files.

" + "smithy.api#documentation": "

In-App Template Response.

" } }, "com.amazonaws.pinpoint#Include": { @@ -10808,6 +11601,37 @@ "smithy.api#documentation": "

Provides information about the status, configuration, and other settings for all the journeys that are associated with an application.

" } }, + "com.amazonaws.pinpoint#Layout": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "BOTTOM_BANNER", + "name": "BOTTOM_BANNER" + }, + { + "value": "TOP_BANNER", + "name": "TOP_BANNER" + }, + { + "value": "OVERLAYS", + "name": "OVERLAYS" + }, + { + "value": "MOBILE_FEED", + "name": "MOBILE_FEED" + }, + { + "value": "MIDDLE_BANNER", + "name": "MIDDLE_BANNER" + }, + { + "value": "CAROUSEL", + "name": "CAROUSEL" + } + ] + } + }, "com.amazonaws.pinpoint#ListJourneys": { "type": "operation", "input": { @@ -10929,6 +11753,18 @@ "target": "com.amazonaws.pinpoint#ImportJobResponse" } }, + "com.amazonaws.pinpoint#ListOfInAppMessageCampaign": { + "type": "list", + "member": { + "target": "com.amazonaws.pinpoint#InAppMessageCampaign" + } + }, + "com.amazonaws.pinpoint#ListOfInAppMessageContent": { + "type": "list", + "member": { + "target": "com.amazonaws.pinpoint#InAppMessageContent" + } + }, "com.amazonaws.pinpoint#ListOfJourneyResponse": { "type": "list", "member": { @@ -11564,6 +12400,12 @@ "traits": { "smithy.api#documentation": "

The message that the campaign sends through the SMS channel. If specified, this message overrides the default message.

" } + }, + "InAppMessage": { + "target": "com.amazonaws.pinpoint#CampaignInAppMessage", + "traits": { + "smithy.api#documentation": "

The in-app message configuration.

" + } } }, "traits": { @@ -11957,6 +12799,27 @@ ] } }, + "com.amazonaws.pinpoint#OverrideButtonConfiguration": { + "type": "structure", + "members": { + "ButtonAction": { + "target": "com.amazonaws.pinpoint#ButtonAction", + "traits": { + "smithy.api#documentation": "

Action triggered by the button.

", + "smithy.api#required": {} + } + }, + "Link": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

Button destination.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Override button configuration.

" + } + }, "com.amazonaws.pinpoint#PayloadTooLargeException": { "type": "structure", "members": { @@ -12062,6 +12925,9 @@ { "target": "com.amazonaws.pinpoint#CreateImportJob" }, + { + "target": "com.amazonaws.pinpoint#CreateInAppTemplate" + }, { "target": "com.amazonaws.pinpoint#CreateJourney" }, @@ -12119,6 +12985,9 @@ { "target": "com.amazonaws.pinpoint#DeleteGcmChannel" }, + { + "target": "com.amazonaws.pinpoint#DeleteInAppTemplate" + }, { "target": "com.amazonaws.pinpoint#DeleteJourney" }, @@ -12224,6 +13093,12 @@ { "target": "com.amazonaws.pinpoint#GetImportJobs" }, + { + "target": "com.amazonaws.pinpoint#GetInAppMessages" + }, + { + "target": "com.amazonaws.pinpoint#GetInAppTemplate" + }, { "target": "com.amazonaws.pinpoint#GetJourney" }, @@ -12353,6 +13228,9 @@ { "target": "com.amazonaws.pinpoint#UpdateGcmChannel" }, + { + "target": "com.amazonaws.pinpoint#UpdateInAppTemplate" + }, { "target": "com.amazonaws.pinpoint#UpdateJourney" }, @@ -14428,6 +15306,32 @@ "smithy.api#documentation": "

Specifies the message template to use for the message, for each type of channel.

" } }, + "com.amazonaws.pinpoint#TemplateCreateMessageBody": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the message template that was created.

" + } + }, + "Message": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The message that's returned from the API for the request to create the message template.

" + } + }, + "RequestID": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The unique identifier for the request to create the message template.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Provides information about a request to create a message template.

" + } + }, "com.amazonaws.pinpoint#TemplateResponse": { "type": "structure", "members": { @@ -14514,6 +15418,10 @@ { "value": "PUSH", "name": "PUSH" + }, + { + "value": "INAPP", + "name": "INAPP" } ] } @@ -15746,6 +16654,92 @@ } } }, + "com.amazonaws.pinpoint#UpdateInAppTemplate": { + "type": "operation", + "input": { + "target": "com.amazonaws.pinpoint#UpdateInAppTemplateRequest" + }, + "output": { + "target": "com.amazonaws.pinpoint#UpdateInAppTemplateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pinpoint#BadRequestException" + }, + { + "target": "com.amazonaws.pinpoint#ForbiddenException" + }, + { + "target": "com.amazonaws.pinpoint#InternalServerErrorException" + }, + { + "target": "com.amazonaws.pinpoint#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.pinpoint#NotFoundException" + }, + { + "target": "com.amazonaws.pinpoint#PayloadTooLargeException" + }, + { + "target": "com.amazonaws.pinpoint#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "

Updates an existing message template for messages sent through the in-app message channel.

", + "smithy.api#http": { + "method": "PUT", + "uri": "/v1/templates/{TemplateName}/inapp", + "code": 202 + } + } + }, + "com.amazonaws.pinpoint#UpdateInAppTemplateRequest": { + "type": "structure", + "members": { + "CreateNewVersion": { + "target": "com.amazonaws.pinpoint#__boolean", + "traits": { + "smithy.api#documentation": "

Specifies whether to save the updates as a new version of the message template. Valid values are: true, save the updates as a new version; and, false, save the updates to (overwrite) the latest existing version of the template.

If you don't specify a value for this parameter, Amazon Pinpoint saves the updates to (overwrites) the latest existing version of the template. If you specify a value of true for this parameter, don't specify a value for the version parameter. Otherwise, an error will occur.

", + "smithy.api#httpQuery": "create-new-version" + } + }, + "InAppTemplateRequest": { + "target": "com.amazonaws.pinpoint#InAppTemplateRequest", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + }, + "TemplateName": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.

", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Version": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "

The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.

If specified, this value must match the identifier for an existing template version. If specified for an update operation, this value must match the identifier for the latest existing version of the template. This restriction helps ensure that race conditions don't occur.

If you don't specify a value for this parameter, Amazon Pinpoint does the following:

", + "smithy.api#httpQuery": "version" + } + } + } + }, + "com.amazonaws.pinpoint#UpdateInAppTemplateResponse": { + "type": "structure", + "members": { + "MessageBody": { + "target": "com.amazonaws.pinpoint#MessageBody", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.pinpoint#UpdateJourney": { "type": "operation", "input": { @@ -17033,6 +18027,12 @@ "traits": { "smithy.api#documentation": "

A custom name of the default treatment for the campaign, if the campaign has multiple treatments. A treatment is a variation of a campaign that's used for A/B testing.

" } + }, + "Priority": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "

Defines the priority of the campaign, used to decide the order of messages displayed to user if there are multiple messages scheduled to be displayed at the same moment.

" + } } }, "traits": { @@ -17289,6 +18289,10 @@ { "value": "CUSTOM", "name": "CUSTOM" + }, + { + "value": "IN_APP", + "name": "IN_APP" } ] } diff --git a/codegen/sdk-codegen/aws-models/quicksight.2018-04-01.json b/codegen/sdk-codegen/aws-models/quicksight.2018-04-01.json index 7e5f25fef86..d3f90542ecb 100644 --- a/codegen/sdk-codegen/aws-models/quicksight.2018-04-01.json +++ b/codegen/sdk-codegen/aws-models/quicksight.2018-04-01.json @@ -183,6 +183,17 @@ "smithy.api#documentation": "

The parameters for Elasticsearch.

" } }, + "com.amazonaws.quicksight#AmazonOpenSearchParameters": { + "type": "structure", + "members": { + "Domain": { + "target": "com.amazonaws.quicksight#Domain", + "traits": { + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.quicksight#Analysis": { "type": "structure", "members": { @@ -4405,6 +4416,9 @@ "traits": { "smithy.api#documentation": "

The parameters for Twitter.

" } + }, + "AmazonOpenSearchParameters": { + "target": "com.amazonaws.quicksight#AmazonOpenSearchParameters" } }, "traits": { @@ -4518,6 +4532,10 @@ { "value": "TIMESTREAM", "name": "TIMESTREAM" + }, + { + "value": "AMAZON_OPENSEARCH", + "name": "AMAZON_OPENSEARCH" } ] } diff --git a/codegen/sdk-codegen/aws-models/rds.2014-10-31.json b/codegen/sdk-codegen/aws-models/rds.2014-10-31.json index 177512865f3..59700556a48 100644 --- a/codegen/sdk-codegen/aws-models/rds.2014-10-31.json +++ b/codegen/sdk-codegen/aws-models/rds.2014-10-31.json @@ -13160,7 +13160,7 @@ } ], "traits": { - "smithy.api#documentation": "

Set the capacity of an Aurora Serverless DB cluster to a specific value.

\n

Aurora Serverless scales seamlessly based on the workload on the DB cluster. In some cases, the capacity might not scale \n fast enough to meet a sudden change in workload, such as a large number of new transactions. Call ModifyCurrentDBClusterCapacity \n to set the capacity explicitly.

\n

After this call sets the DB cluster capacity, Aurora Serverless can automatically scale\n the DB cluster based on the cooldown period for scaling up and the cooldown period\n for scaling down.

\n

For more information about Aurora Serverless, see Using Amazon Aurora Serverless in the \n Amazon Aurora User Guide.

\n \n

If you call ModifyCurrentDBClusterCapacity with the default TimeoutAction, connections that \n prevent Aurora Serverless from finding a scaling point might be dropped. For more information about scaling points, \n see \n Autoscaling for Aurora Serverless in the Amazon Aurora User Guide.

\n
\n \n

This action only applies to Aurora DB clusters.

\n
" + "smithy.api#documentation": "

Set the capacity of an Aurora Serverless DB cluster to a specific value.

\n

Aurora Serverless scales seamlessly based on the workload on the DB cluster. In some cases, the capacity might not scale \n fast enough to meet a sudden change in workload, such as a large number of new transactions. Call ModifyCurrentDBClusterCapacity \n to set the capacity explicitly.

\n

After this call sets the DB cluster capacity, Aurora Serverless can automatically scale\n the DB cluster based on the cooldown period for scaling up and the cooldown period\n for scaling down.

\n

For more information about Aurora Serverless, see Using Amazon Aurora Serverless in the \n Amazon Aurora User Guide.

\n \n

If you call ModifyCurrentDBClusterCapacity with the default TimeoutAction, connections that \n prevent Aurora Serverless from finding a scaling point might be dropped. For more information about scaling points, \n see \n Autoscaling for Aurora Serverless in the Amazon Aurora User Guide.

\n
\n \n

This action only applies to Aurora Serverless DB clusters.

\n
" } }, "com.amazonaws.rds#ModifyCurrentDBClusterCapacityMessage": { @@ -13182,7 +13182,7 @@ "SecondsBeforeTimeout": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

The amount of time, in seconds, that Aurora Serverless tries to find a scaling point\n to perform seamless scaling before enforcing the timeout action. The default is\n 300.

\n " + "smithy.api#documentation": "

The amount of time, in seconds, that Aurora Serverless tries to find a scaling point\n to perform seamless scaling before enforcing the timeout action. The default is\n 300.

\n

Specify a value between 10 and 600 seconds.

" } }, "TimeoutAction": { @@ -18583,7 +18583,7 @@ "AutoPause": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether to allow or disallow automatic pause for an Aurora DB cluster in serverless DB engine mode. \n A DB cluster can be paused only when it's idle (it has no connections).

\n \n

If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. \n In this case, the DB cluster is restored when there is a request to connect to it.

\n
" + "smithy.api#documentation": "

A value that indicates whether to allow or disallow automatic pause for an Aurora DB cluster in serverless DB engine mode.\n A DB cluster can be paused only when it's idle (it has no connections).

\n \n

If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot.\n In this case, the DB cluster is restored when there is a request to connect to it.

\n
" } }, "SecondsUntilAutoPause": { @@ -18595,12 +18595,18 @@ "TimeoutAction": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The action to take when the timeout is reached, either ForceApplyCapacityChange or RollbackCapacityChange.

\n

\n ForceApplyCapacityChange sets the capacity to the specified value as soon as possible.

\n

\n RollbackCapacityChange, the default, ignores the capacity change if a scaling point isn't found in the timeout period.

\n \n

If you specify ForceApplyCapacityChange, connections that \n prevent Aurora Serverless from finding a scaling point might be dropped.

\n
\n

For more information, see \n Autoscaling for Aurora Serverless in the Amazon Aurora User Guide.

" + "smithy.api#documentation": "

The action to take when the timeout is reached, either ForceApplyCapacityChange or RollbackCapacityChange.

\n

\n ForceApplyCapacityChange sets the capacity to the specified value as soon as possible.

\n

\n RollbackCapacityChange, the default, ignores the capacity change if a scaling point isn't found in the timeout period.

\n \n

If you specify ForceApplyCapacityChange, connections that\n prevent Aurora Serverless from finding a scaling point might be dropped.

\n
\n

For more information, see \n Autoscaling for Aurora Serverless in the Amazon Aurora User Guide.

" + } + }, + "SecondsBeforeTimeout": { + "target": "com.amazonaws.rds#IntegerOptional", + "traits": { + "smithy.api#documentation": "

The amount of time, in seconds, that Aurora Serverless tries to find a scaling point\n to perform seamless scaling before enforcing the timeout action. The default is 300.

\n

Specify a value between 60 and 600 seconds.

" } } }, "traits": { - "smithy.api#documentation": "

Contains the scaling configuration of an Aurora Serverless DB cluster.

\n

For more information, see Using Amazon Aurora Serverless in the \n Amazon Aurora User Guide.

" + "smithy.api#documentation": "

Contains the scaling configuration of an Aurora Serverless DB cluster.

\n

For more information, see Using Amazon Aurora Serverless in the\n Amazon Aurora User Guide.

" } }, "com.amazonaws.rds#ScalingConfigurationInfo": { @@ -18621,7 +18627,7 @@ "AutoPause": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

A value that indicates whether automatic pause is allowed for the Aurora DB cluster\n in serverless DB engine mode.

\n

When the value is set to false for an Aurora Serverless DB cluster, the DB cluster automatically resumes.

" + "smithy.api#documentation": "

A value that indicates whether automatic pause is allowed for the Aurora DB cluster\n in serverless DB engine mode.

\n

When the value is set to false for an Aurora Serverless DB cluster, the DB cluster automatically resumes.

" } }, "SecondsUntilAutoPause": { @@ -18633,12 +18639,18 @@ "TimeoutAction": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

The timeout action of a call to ModifyCurrentDBClusterCapacity, either \n ForceApplyCapacityChange or RollbackCapacityChange.

" + "smithy.api#documentation": "

The action that occurs when Aurora times out while attempting to change the capacity of an\n Aurora Serverless cluster. The value is either ForceApplyCapacityChange or\n RollbackCapacityChange.

\n

\n ForceApplyCapacityChange, the default, sets the capacity to the specified value as soon as possible.

\n

\n RollbackCapacityChange ignores the capacity change if a scaling point isn't found in the timeout period.

" + } + }, + "SecondsBeforeTimeout": { + "target": "com.amazonaws.rds#IntegerOptional", + "traits": { + "smithy.api#documentation": "

The number of seconds before scaling times out. What happens when an attempted scaling action times out\n is determined by the TimeoutAction setting.

" } } }, "traits": { - "smithy.api#documentation": "

Shows the scaling configuration for an Aurora DB cluster in serverless DB engine mode.

\n

For more information, see Using Amazon Aurora Serverless in the \n Amazon Aurora User Guide.

" + "smithy.api#documentation": "

Shows the scaling configuration for an Aurora DB cluster in serverless DB engine mode.

\n

For more information, see Using Amazon Aurora Serverless in the\n Amazon Aurora User Guide.

" } }, "com.amazonaws.rds#SharedSnapshotQuotaExceededFault": { diff --git a/codegen/sdk-codegen/aws-models/robomaker.2018-06-29.json b/codegen/sdk-codegen/aws-models/robomaker.2018-06-29.json index 39ea261a018..06d0fc64705 100644 --- a/codegen/sdk-codegen/aws-models/robomaker.2018-06-29.json +++ b/codegen/sdk-codegen/aws-models/robomaker.2018-06-29.json @@ -812,8 +812,7 @@ "sources": { "target": "com.amazonaws.robomaker#SourceConfigs", "traits": { - "smithy.api#documentation": "

The sources of the robot application.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The sources of the robot application.

" } }, "robotSoftwareSuite": { @@ -828,6 +827,12 @@ "traits": { "smithy.api#documentation": "

A map that contains tag keys and tag values that are attached to the robot\n application.

" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "

The object that contains that URI of the Docker image that you use for your robot application.

" + } } } }, @@ -881,6 +886,12 @@ "traits": { "smithy.api#documentation": "

The list of all tags added to the robot application.

" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "

An object that contains the Docker image URI used to a create your robot application.

" + } } } }, @@ -933,6 +944,18 @@ "traits": { "smithy.api#documentation": "

The current revision id for the robot application. If you provide a value and it matches\n the latest revision ID, a new version will be created.

" } + }, + "s3Etags": { + "target": "com.amazonaws.robomaker#S3Etags", + "traits": { + "smithy.api#documentation": "

The Amazon S3 identifier for the zip file bundle that you use for your robot application.

" + } + }, + "imageDigest": { + "target": "com.amazonaws.robomaker#ImageDigest", + "traits": { + "smithy.api#documentation": "

A SHA256 identifier for the Docker image that you use for your robot application.

" + } } } }, @@ -980,6 +1003,12 @@ "traits": { "smithy.api#documentation": "

The revision id of the robot application.

" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "

The object that contains the Docker image URI used to create your robot application.

" + } } } }, @@ -1106,8 +1135,7 @@ "sources": { "target": "com.amazonaws.robomaker#SourceConfigs", "traits": { - "smithy.api#documentation": "

The sources of the simulation application.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The sources of the simulation application.

" } }, "simulationSoftwareSuite": { @@ -1135,6 +1163,12 @@ "traits": { "smithy.api#documentation": "

A map that contains tag keys and tag values that are attached to the simulation\n application.

" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "

The object that contains the Docker image URI used to create your simulation application.

" + } } } }, @@ -1200,6 +1234,12 @@ "traits": { "smithy.api#documentation": "

The list of all tags added to the simulation application.

" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "

The object that contains the Docker image URI that you used to create your simulation application.

" + } } } }, @@ -1252,6 +1292,18 @@ "traits": { "smithy.api#documentation": "

The current revision id for the simulation application. If you provide a value and it\n matches the latest revision ID, a new version will be created.

" } + }, + "s3Etags": { + "target": "com.amazonaws.robomaker#S3Etags", + "traits": { + "smithy.api#documentation": "

The Amazon S3 eTag identifier for the zip file bundle that you use to create the simulation application.

" + } + }, + "imageDigest": { + "target": "com.amazonaws.robomaker#ImageDigest", + "traits": { + "smithy.api#documentation": "

The SHA256 digest used to identify the Docker image URI used to created the simulation application.

" + } } } }, @@ -1311,6 +1363,12 @@ "traits": { "smithy.api#documentation": "

The revision ID of the simulation application.

" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "

The object that contains the Docker image URI used to create the simulation application.

" + } } } }, @@ -2977,6 +3035,18 @@ "traits": { "smithy.api#documentation": "

The list of all tags added to the specified robot application.

" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "

The object that contains the Docker image URI used to create the robot application.

" + } + }, + "imageDigest": { + "target": "com.amazonaws.robomaker#ImageDigest", + "traits": { + "smithy.api#documentation": "

A SHA256 identifier for the Docker image that you use for your robot application.

" + } } } }, @@ -3168,6 +3238,18 @@ "traits": { "smithy.api#documentation": "

The list of all tags added to the specified simulation application.

" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "

The object that contains the Docker image URI used to create the simulation application.

" + } + }, + "imageDigest": { + "target": "com.amazonaws.robomaker#ImageDigest", + "traits": { + "smithy.api#documentation": "

A SHA256 identifier for the Docker image that you use for your simulation application.

" + } } } }, @@ -3855,6 +3937,20 @@ } } }, + "com.amazonaws.robomaker#Environment": { + "type": "structure", + "members": { + "uri": { + "target": "com.amazonaws.robomaker#RepositoryUrl", + "traits": { + "smithy.api#documentation": "

The Docker image URI for either your robot or simulation applications.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The object that contains the Docker image URI for either your robot or simulation applications.

" + } + }, "com.amazonaws.robomaker#EnvironmentVariableKey": { "type": "string", "traits": { @@ -4221,6 +4317,16 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.robomaker#ImageDigest": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 72 + }, + "smithy.api#pattern": "^[Ss][Hh][Aa]256:[0-9a-fA-F]{64}$" + } + }, "com.amazonaws.robomaker#Integer": { "type": "integer" }, @@ -5549,6 +5655,16 @@ "smithy.api#pattern": "^1.x$" } }, + "com.amazonaws.robomaker#RepositoryUrl": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^.+$" + } + }, "com.amazonaws.robomaker#ResourceAlreadyExistsException": { "type": "structure", "members": { @@ -6026,6 +6142,12 @@ "com.amazonaws.robomaker#S3Etag": { "type": "string" }, + "com.amazonaws.robomaker#S3Etags": { + "type": "list", + "member": { + "target": "com.amazonaws.robomaker#S3Etag" + } + }, "com.amazonaws.robomaker#S3Key": { "type": "string", "traits": { @@ -7560,8 +7682,7 @@ "sources": { "target": "com.amazonaws.robomaker#SourceConfigs", "traits": { - "smithy.api#documentation": "

The sources of the robot application.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The sources of the robot application.

" } }, "robotSoftwareSuite": { @@ -7576,6 +7697,12 @@ "traits": { "smithy.api#documentation": "

The revision id for the robot application.

" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "

The object that contains the Docker image URI for your robot application.

" + } } } }, @@ -7623,6 +7750,12 @@ "traits": { "smithy.api#documentation": "

The revision id of the robot application.

" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "

The object that contains the Docker image URI for your robot application.

" + } } } }, @@ -7673,8 +7806,7 @@ "sources": { "target": "com.amazonaws.robomaker#SourceConfigs", "traits": { - "smithy.api#documentation": "

The sources of the simulation application.

", - "smithy.api#required": {} + "smithy.api#documentation": "

The sources of the simulation application.

" } }, "simulationSoftwareSuite": { @@ -7702,6 +7834,12 @@ "traits": { "smithy.api#documentation": "

The revision id for the robot application.

" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "

The object that contains the Docker image URI for your simulation application.

" + } } } }, @@ -7761,6 +7899,12 @@ "traits": { "smithy.api#documentation": "

The revision id of the simulation application.

" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "

The object that contains the Docker image URI used for your simulation application.

" + } } } }, diff --git a/codegen/sdk-codegen/aws-models/s3.2006-03-01.json b/codegen/sdk-codegen/aws-models/s3.2006-03-01.json index 393262c6372..32ab20ff528 100644 --- a/codegen/sdk-codegen/aws-models/s3.2006-03-01.json +++ b/codegen/sdk-codegen/aws-models/s3.2006-03-01.json @@ -177,6 +177,9 @@ "smithy.api#documentation": "

A container for information about access control for replicas.

" } }, + "com.amazonaws.s3#AccessPointArn": { + "type": "string" + }, "com.amazonaws.s3#AccountId": { "type": "string" }, @@ -1193,7 +1196,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

If you specified server-side encryption either with an Amazon S3-managed encryption key or an\n Amazon Web Services KMS customer master key (CMK) in your initiate multipart upload request, the response\n includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the\n object.

", + "smithy.api#documentation": "

If you specified server-side encryption either with an Amazon S3-managed encryption key or an\n Amazon Web Services KMS key in your initiate multipart upload request, the response\n includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the\n object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -1207,7 +1210,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.

", + "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key that was used for the object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -1468,7 +1471,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.

", + "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key that was used for the object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -1926,6 +1929,14 @@ "smithy.api#required": {} } }, + "CreateBucketConfiguration": { + "target": "com.amazonaws.s3#CreateBucketConfiguration", + "traits": { + "smithy.api#documentation": "

The configuration information for the bucket.

", + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "CreateBucketConfiguration" + } + }, "GrantFullControl": { "target": "com.amazonaws.s3#GrantFullControl", "traits": { @@ -1967,14 +1978,6 @@ "smithy.api#documentation": "

Specifies whether you want S3 Object Lock to be enabled for the new bucket.

", "smithy.api#httpHeader": "x-amz-bucket-object-lock-enabled" } - }, - "CreateBucketConfiguration": { - "target": "com.amazonaws.s3#CreateBucketConfiguration", - "traits": { - "smithy.api#documentation": "

The configuration information for the bucket.

", - "smithy.api#httpPayload": {}, - "smithy.api#xmlName": "CreateBucketConfiguration" - } } } }, @@ -1987,7 +1990,7 @@ "target": "com.amazonaws.s3#CreateMultipartUploadOutput" }, "traits": { - "smithy.api#documentation": "

This action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request.

\n\n

For more information about multipart uploads, see Multipart Upload Overview.

\n\n

If you have configured a lifecycle rule to abort incomplete multipart uploads, the\n upload must complete within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting\n Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

\n\n

For information about the permissions required to use the multipart upload API, see\n Multipart Upload and\n Permissions.

\n\n

For request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating\n Requests (Amazon Web Services Signature Version 4).

\n\n \n

After you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stop charging you for\n storing them only after you either complete or abort a multipart upload.

\n
\n\n

You can optionally request server-side encryption. For server-side encryption, Amazon S3\n encrypts your data as it writes it to disks in its data centers and decrypts it when you\n access it. You can provide your own encryption key, or use Amazon Web Services Key Management Service (Amazon Web Services\n KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide\n your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to\n initiate the upload by using CreateMultipartUpload.

\n

To perform a multipart upload with encryption using an Amazon Web Services KMS CMK, the requester must\n have permission to the kms:Decrypt and kms:GenerateDataKey*\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions in the Amazon S3 User Guide.

\n\n

If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account\n as the Amazon Web Services KMS CMK, then you must have these permissions on the key policy. If your IAM\n user or role belongs to a different account than the key, then you must have the\n permissions on both the key policy and your IAM user or role.

\n\n\n

For more information, see Protecting\n Data Using Server-Side Encryption.

\n\n
\n
Access Permissions
\n
\n

When copying an object, you can optionally specify the accounts or groups that\n should be granted specific permissions on the new object. There are two ways to\n grant the permissions using the request headers:

\n \n

You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.

\n
\n
Server-Side- Encryption-Specific Request Headers
\n
\n

You can optionally tell Amazon S3 to encrypt data at rest using server-side\n encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts\n your data as it writes it to disks in its data centers and decrypts it when you\n access it. The option you use depends on whether you want to use Amazon Web Services managed\n encryption keys or provide your own encryption key.

\n \n
\n
Access-Control-List (ACL)-Specific Request Headers
\n
\n

You also can use the following access control–related headers with this\n operation. By default, all objects are private. Only the owner has full access\n control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added\n to the access control list (ACL) on the object. For more information, see Using ACLs. With this\n operation, you can grant access permissions using one of the following two\n methods:

\n \n\n
\n
\n\n

The following operations are related to CreateMultipartUpload:

\n ", + "smithy.api#documentation": "

This action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request.

\n\n

For more information about multipart uploads, see Multipart Upload Overview.

\n\n

If you have configured a lifecycle rule to abort incomplete multipart uploads, the\n upload must complete within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting\n Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.

\n\n

For information about the permissions required to use the multipart upload API, see\n Multipart Upload and\n Permissions.

\n\n

For request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating\n Requests (Amazon Web Services Signature Version 4).

\n\n \n

After you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stop charging you for\n storing them only after you either complete or abort a multipart upload.

\n
\n\n

You can optionally request server-side encryption. For server-side encryption, Amazon S3\n encrypts your data as it writes it to disks in its data centers and decrypts it when you\n access it. You can provide your own encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed encryption keys. If you choose to provide\n your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to\n initiate the upload by using CreateMultipartUpload.

\n

To perform a multipart upload with encryption using an Amazon Web Services KMS key, the requester must\n have permission to the kms:Decrypt and kms:GenerateDataKey*\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions in the Amazon S3 User Guide.

\n\n

If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account\n as the KMS key, then you must have these permissions on the key policy. If your IAM\n user or role belongs to a different account than the key, then you must have the\n permissions on both the key policy and your IAM user or role.

\n\n\n

For more information, see Protecting\n Data Using Server-Side Encryption.

\n\n
\n
Access Permissions
\n
\n

When copying an object, you can optionally specify the accounts or groups that\n should be granted specific permissions on the new object. There are two ways to\n grant the permissions using the request headers:

\n \n

You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.

\n
\n
Server-Side- Encryption-Specific Request Headers
\n
\n

You can optionally tell Amazon S3 to encrypt data at rest using server-side\n encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts\n your data as it writes it to disks in its data centers and decrypts it when you\n access it. The option you use depends on whether you want to use Amazon Web Services managed\n encryption keys or provide your own encryption key.

\n \n
\n
Access-Control-List (ACL)-Specific Request Headers
\n
\n

You also can use the following access control–related headers with this\n operation. By default, all objects are private. Only the owner has full access\n control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added\n to the access control list (ACL) on the object. For more information, see Using ACLs. With this\n operation, you can grant access permissions using one of the following two\n methods:

\n \n\n
\n
\n\n

The following operations are related to CreateMultipartUpload:

\n ", "smithy.api#http": { "method": "POST", "uri": "/{Bucket}/{Key+}?uploads&x-id=CreateMultipartUpload", @@ -2055,7 +2058,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.

", + "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key that was used for the object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -2232,7 +2235,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

Specifies the ID of the symmetric customer managed Amazon Web Services KMS CMK to use for object\n encryption. All GET and PUT requests for an object protected by Amazon Web Services KMS will fail if not\n made via SSL or using SigV4. For information about configuring using any of the officially\n supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

Specifies the ID of the symmetric customer managed key to use for object\n encryption. All GET and PUT requests for an object protected by Amazon Web Services KMS will fail if not\n made via SSL or using SigV4. For information about configuring using any of the officially\n supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -2487,7 +2490,7 @@ "target": "com.amazonaws.s3#DeleteBucketIntelligentTieringConfigurationRequest" }, "traits": { - "smithy.api#documentation": "

Deletes the S3 Intelligent-Tiering configuration from the specified bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

\n

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

\n

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to\n DeleteBucketIntelligentTieringConfiguration include:

\n ", + "smithy.api#documentation": "

Deletes the S3 Intelligent-Tiering configuration from the specified bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

\n

The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

\n

For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to\n DeleteBucketIntelligentTieringConfiguration include:

\n ", "smithy.api#http": { "method": "DELETE", "uri": "/{Bucket}?intelligent-tiering", @@ -3122,6 +3125,15 @@ "smithy.api#required": {} } }, + "Delete": { + "target": "com.amazonaws.s3#Delete", + "traits": { + "smithy.api#documentation": "

Container for the request.

", + "smithy.api#httpPayload": {}, + "smithy.api#required": {}, + "smithy.api#xmlName": "Delete" + } + }, "MFA": { "target": "com.amazonaws.s3#MFA", "traits": { @@ -3148,15 +3160,6 @@ "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } - }, - "Delete": { - "target": "com.amazonaws.s3#Delete", - "traits": { - "smithy.api#documentation": "

Container for the request.

", - "smithy.api#httpPayload": {}, - "smithy.api#required": {}, - "smithy.api#xmlName": "Delete" - } } } }, @@ -3326,7 +3329,7 @@ "KMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If the encryption type is aws:kms, this optional value specifies the ID of\n the symmetric customer managed Amazon Web Services KMS CMK to use for encryption of job results. Amazon S3 only\n supports symmetric CMKs. For more information, see Using symmetric and\n asymmetric keys in the Amazon Web Services Key Management Service Developer Guide.

" + "smithy.api#documentation": "

If the encryption type is aws:kms, this optional value specifies the ID of\n the symmetric customer managed key to use for encryption of job results. Amazon S3 only\n supports symmetric keys. For more information, see Using symmetric and\n asymmetric keys in the Amazon Web Services Key Management Service Developer Guide.

" } }, "KMSContext": { @@ -3913,7 +3916,7 @@ "target": "com.amazonaws.s3#GetBucketIntelligentTieringConfigurationOutput" }, "traits": { - "smithy.api#documentation": "

Gets the S3 Intelligent-Tiering configuration from the specified bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

\n

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

\n

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to\n GetBucketIntelligentTieringConfiguration include:

\n ", + "smithy.api#documentation": "

Gets the S3 Intelligent-Tiering configuration from the specified bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

\n

The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

\n

For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to\n GetBucketIntelligentTieringConfiguration include:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?intelligent-tiering&x-id=GetBucketIntelligentTieringConfiguration", @@ -4700,7 +4703,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves objects from Amazon S3. To use GET, you must have READ\n access to the object. If you grant READ access to the anonymous user, you can\n return the object without using an authorization header.

\n\n

An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer\n file system. You can, however, create a logical hierarchy by using object key names that\n imply a folder structure. For example, instead of naming an object sample.jpg,\n you can name it photos/2006/February/sample.jpg.

\n\n

To get an object from such a logical hierarchy, specify the full key name for the object\n in the GET operation. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg, specify the resource as\n /photos/2006/February/sample.jpg. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg in the bucket named\n examplebucket, specify the resource as\n /examplebucket/photos/2006/February/sample.jpg. For more information about\n request types, see HTTP Host Header Bucket Specification.

\n\n

To distribute large files to many people, you can save bandwidth costs by using\n BitTorrent. For more information, see Amazon S3\n Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

\n\n

If the object you are retrieving is stored in the S3 Glacier or\n S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this action returns an\n InvalidObjectStateError error. For information about restoring archived\n objects, see Restoring Archived\n Objects.

\n\n

Encryption request headers, like x-amz-server-side-encryption, should not\n be sent for GET requests if your object uses server-side encryption with CMKs stored in Amazon Web Services\n KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your\n object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:

\n \n

For more information about SSE-C, see Server-Side Encryption (Using\n Customer-Provided Encryption Keys).

\n\n

Assuming you have the relevant permission to read object tags, the response also returns the\n x-amz-tagging-count header that provides the count of number of tags\n associated with the object. You can use GetObjectTagging to retrieve\n the tag set associated with an object.

\n\n

\n Permissions\n

\n

You need the relevant read object (or version) permission for this operation. For more\n information, see Specifying Permissions\n in a Policy. If the object you request does not exist, the error Amazon S3 returns\n depends on whether you also have the s3:ListBucket permission.

\n \n\n\n

\n Versioning\n

\n

By default, the GET action returns the current version of an object. To return a\n different version, use the versionId subresource.

\n\n \n \n \n\n\n

For more information about versioning, see PutBucketVersioning.

\n\n

\n Overriding Response Header Values\n

\n

There are times when you want to override certain response header values in a GET\n response. For example, you might override the Content-Disposition response header value in\n your GET request.

\n\n

You can override values for a set of response headers using the following query\n parameters. These response header values are sent only on a successful request, that is,\n when status code 200 OK is returned. The set of headers you can override using these\n parameters is a subset of the headers that Amazon S3 accepts when you create an object. The\n response headers that you can override for the GET response are Content-Type,\n Content-Language, Expires, Cache-Control,\n Content-Disposition, and Content-Encoding. To override these\n header values in the GET response, you use the following request parameters.

\n\n \n

You must sign the request, either using an Authorization header or a presigned URL,\n when using these parameters. They cannot be used with an unsigned (anonymous)\n request.

\n
\n \n\n

\n Additional Considerations about Request Headers\n

\n\n

If both of the If-Match and If-Unmodified-Since headers are\n present in the request as follows: If-Match condition evaluates to\n true, and; If-Unmodified-Since condition evaluates to\n false; then, S3 returns 200 OK and the data requested.

\n\n

If both of the If-None-Match and If-Modified-Since headers are\n present in the request as follows: If-None-Match condition evaluates to\n false, and; If-Modified-Since condition evaluates to\n true; then, S3 returns 304 Not Modified response code.

\n\n

For more information about conditional requests, see RFC 7232.

\n\n

The following operations are related to GetObject:

\n ", + "smithy.api#documentation": "

Retrieves objects from Amazon S3. To use GET, you must have READ\n access to the object. If you grant READ access to the anonymous user, you can\n return the object without using an authorization header.

\n\n

An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer\n file system. You can, however, create a logical hierarchy by using object key names that\n imply a folder structure. For example, instead of naming an object sample.jpg,\n you can name it photos/2006/February/sample.jpg.

\n\n

To get an object from such a logical hierarchy, specify the full key name for the object\n in the GET operation. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg, specify the resource as\n /photos/2006/February/sample.jpg. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg in the bucket named\n examplebucket, specify the resource as\n /examplebucket/photos/2006/February/sample.jpg. For more information about\n request types, see HTTP Host Header Bucket Specification.

\n\n

To distribute large files to many people, you can save bandwidth costs by using\n BitTorrent. For more information, see Amazon S3\n Torrent. For more information about returning the ACL of an object, see GetObjectAcl.

\n\n

If the object you are retrieving is stored in the S3 Glacier or\n S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this action returns an\n InvalidObjectStateError error. For information about restoring archived\n objects, see Restoring Archived\n Objects.

\n\n

Encryption request headers, like x-amz-server-side-encryption, should not\n be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS) \n or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your\n object does use these types of keys, you’ll get an HTTP 400 BadRequest error.

\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:

\n \n

For more information about SSE-C, see Server-Side Encryption (Using\n Customer-Provided Encryption Keys).

\n\n

Assuming you have the relevant permission to read object tags, the response also returns the\n x-amz-tagging-count header that provides the count of number of tags\n associated with the object. You can use GetObjectTagging to retrieve\n the tag set associated with an object.

\n\n

\n Permissions\n

\n

You need the relevant read object (or version) permission for this operation. For more\n information, see Specifying Permissions\n in a Policy. If the object you request does not exist, the error Amazon S3 returns\n depends on whether you also have the s3:ListBucket permission.

\n \n\n\n

\n Versioning\n

\n

By default, the GET action returns the current version of an object. To return a\n different version, use the versionId subresource.

\n\n \n \n \n\n\n

For more information about versioning, see PutBucketVersioning.

\n\n

\n Overriding Response Header Values\n

\n

There are times when you want to override certain response header values in a GET\n response. For example, you might override the Content-Disposition response header value in\n your GET request.

\n\n

You can override values for a set of response headers using the following query\n parameters. These response header values are sent only on a successful request, that is,\n when status code 200 OK is returned. The set of headers you can override using these\n parameters is a subset of the headers that Amazon S3 accepts when you create an object. The\n response headers that you can override for the GET response are Content-Type,\n Content-Language, Expires, Cache-Control,\n Content-Disposition, and Content-Encoding. To override these\n header values in the GET response, you use the following request parameters.

\n\n \n

You must sign the request, either using an Authorization header or a presigned URL,\n when using these parameters. They cannot be used with an unsigned (anonymous)\n request.

\n
\n \n\n

\n Additional Considerations about Request Headers\n

\n\n

If both of the If-Match and If-Unmodified-Since headers are\n present in the request as follows: If-Match condition evaluates to\n true, and; If-Unmodified-Since condition evaluates to\n false; then, S3 returns 200 OK and the data requested.

\n\n

If both of the If-None-Match and If-Modified-Since headers are\n present in the request as follows: If-None-Match condition evaluates to\n false, and; If-Modified-Since condition evaluates to\n true; then, S3 returns 304 Not Modified response code.

\n\n

For more information about conditional requests, see RFC 7232.

\n\n

The following operations are related to GetObject:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}/{Key+}?x-id=GetObject", @@ -5077,7 +5080,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.

", + "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key that was used for the object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -5151,7 +5154,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

The bucket name containing the object.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

The bucket name containing the object.

\n

When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.

\n

When using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.

\n

When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5749,7 +5752,7 @@ } ], "traits": { - "smithy.api#documentation": "

The HEAD action retrieves metadata from an object without returning the object\n itself. This action is useful if you're only interested in an object's metadata. To use\n HEAD, you must have READ access to the object.

\n\n

A HEAD request has the same options as a GET action on an\n object. The response is identical to the GET response except that there is no\n response body. Because of this, if the HEAD request generates an error, it\n returns a generic 404 Not Found or 403 Forbidden code. It is not \n possible to retrieve the exact exception beyond these error codes.

\n\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:

\n \n

For more information about SSE-C, see Server-Side Encryption (Using\n Customer-Provided Encryption Keys).

\n \n \n \n\n\n

Request headers are limited to 8 KB in size. For more information, see Common Request\n Headers.

\n

Consider the following when using request headers:

\n \n\n

For more information about conditional requests, see RFC 7232.

\n\n

\n Permissions\n

\n

You need the relevant read object (or version) permission for this operation. For more\n information, see Specifying Permissions\n in a Policy. If the object you request does not exist, the error Amazon S3 returns\n depends on whether you also have the s3:ListBucket permission.

\n \n\n

The following action is related to HeadObject:

\n ", + "smithy.api#documentation": "

The HEAD action retrieves metadata from an object without returning the object\n itself. This action is useful if you're only interested in an object's metadata. To use\n HEAD, you must have READ access to the object.

\n\n

A HEAD request has the same options as a GET action on an\n object. The response is identical to the GET response except that there is no\n response body. Because of this, if the HEAD request generates an error, it\n returns a generic 404 Not Found or 403 Forbidden code. It is not \n possible to retrieve the exact exception beyond these error codes.

\n\n

If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:

\n \n

For more information about SSE-C, see Server-Side Encryption (Using\n Customer-Provided Encryption Keys).

\n \n \n \n\n\n

Request headers are limited to 8 KB in size. For more information, see Common Request\n Headers.

\n

Consider the following when using request headers:

\n \n\n

For more information about conditional requests, see RFC 7232.

\n\n

\n Permissions\n

\n

You need the relevant read object (or version) permission for this operation. For more\n information, see Specifying Permissions\n in a Policy. If the object you request does not exist, the error Amazon S3 returns\n depends on whether you also have the s3:ListBucket permission.

\n \n\n

The following action is related to HeadObject:

\n ", "smithy.api#http": { "method": "HEAD", "uri": "/{Bucket}/{Key+}", @@ -5912,7 +5915,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

If the object is stored using server-side encryption either with an Amazon Web Services KMS customer\n master key (CMK) or an Amazon S3-managed encryption key, the response includes this header with\n the value of the server-side encryption algorithm used when storing this object in Amazon\n S3 (for example, AES256, aws:kms).

", + "smithy.api#documentation": "

If the object is stored using server-side encryption either with an Amazon Web Services KMS key or \n an Amazon S3-managed encryption key, the response includes this header with\n the value of the server-side encryption algorithm used when storing this object in Amazon\n S3 (for example, AES256, aws:kms).

", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -5940,7 +5943,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.

", + "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key that was used for the object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -6958,7 +6961,7 @@ "target": "com.amazonaws.s3#ListBucketIntelligentTieringConfigurationsOutput" }, "traits": { - "smithy.api#documentation": "

Lists the S3 Intelligent-Tiering configuration from the specified bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

\n

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

\n

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to\n ListBucketIntelligentTieringConfigurations include:

\n ", + "smithy.api#documentation": "

Lists the S3 Intelligent-Tiering configuration from the specified bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

\n

The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

\n

For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to\n ListBucketIntelligentTieringConfigurations include:

\n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?intelligent-tiering&x-id=ListBucketIntelligentTieringConfigurations", @@ -8222,6 +8225,12 @@ "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "Tag" } + }, + "AccessPointArn": { + "target": "com.amazonaws.s3#AccessPointArn", + "traits": { + "smithy.api#documentation": "

The access point ARN used when evaluating an AND predicate.

" + } } }, "traits": { @@ -8241,12 +8250,12 @@ "Filter": { "target": "com.amazonaws.s3#MetricsFilter", "traits": { - "smithy.api#documentation": "

Specifies a metrics configuration filter. The metrics configuration will only include\n objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction\n (MetricsAndOperator).

" + "smithy.api#documentation": "

Specifies a metrics configuration filter. The metrics configuration will only include\n objects that meet the filter's criteria. A filter must be a prefix, an object tag, an access point ARN, or a conjunction\n (MetricsAndOperator).

" } } }, "traits": { - "smithy.api#documentation": "

Specifies a metrics configuration for the CloudWatch request metrics (specified by the\n metrics configuration ID) from an Amazon S3 bucket. If you're updating an existing metrics\n configuration, note that this is a full replacement of the existing metrics configuration.\n If you don't include the elements you want to keep, they are erased. For more information,\n see PUT Bucket\n metrics in the Amazon S3 API Reference.

" + "smithy.api#documentation": "

Specifies a metrics configuration for the CloudWatch request metrics (specified by the\n metrics configuration ID) from an Amazon S3 bucket. If you're updating an existing metrics\n configuration, note that this is a full replacement of the existing metrics configuration.\n If you don't include the elements you want to keep, they are erased. For more information,\n see PutBucketMetricsConfiguration.

" } }, "com.amazonaws.s3#MetricsConfigurationList": { @@ -8270,6 +8279,12 @@ "smithy.api#documentation": "

The tag used when evaluating a metrics filter.

" } }, + "AccessPointArn": { + "target": "com.amazonaws.s3#AccessPointArn", + "traits": { + "smithy.api#documentation": "

The access point ARN used when evaluating a metrics filter.

" + } + }, "And": { "target": "com.amazonaws.s3#MetricsAndOperator", "traits": { @@ -8278,7 +8293,7 @@ } }, "traits": { - "smithy.api#documentation": "

Specifies a metrics configuration filter. The metrics configuration only includes\n objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction\n (MetricsAndOperator).

" + "smithy.api#documentation": "

Specifies a metrics configuration filter. The metrics configuration only includes\n objects that meet the filter's criteria. A filter must be a prefix, an object tag, an access point ARN, or a conjunction\n (MetricsAndOperator). For more information, see PutBucketMetricsConfiguration.

" } }, "com.amazonaws.s3#MetricsId": { @@ -9242,13 +9257,6 @@ "smithy.api#required": {} } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "smithy.api#httpHeader": "x-amz-expected-bucket-owner" - } - }, "AccelerateConfiguration": { "target": "com.amazonaws.s3#AccelerateConfiguration", "traits": { @@ -9257,6 +9265,13 @@ "smithy.api#required": {}, "smithy.api#xmlName": "AccelerateConfiguration" } + }, + "ExpectedBucketOwner": { + "target": "com.amazonaws.s3#AccountId", + "traits": { + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "smithy.api#httpHeader": "x-amz-expected-bucket-owner" + } } } }, @@ -9285,6 +9300,14 @@ "smithy.api#httpHeader": "x-amz-acl" } }, + "AccessControlPolicy": { + "target": "com.amazonaws.s3#AccessControlPolicy", + "traits": { + "smithy.api#documentation": "

Contains the elements that set the ACL permissions for an object per grantee.

", + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "AccessControlPolicy" + } + }, "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { @@ -9341,14 +9364,6 @@ "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } - }, - "AccessControlPolicy": { - "target": "com.amazonaws.s3#AccessControlPolicy", - "traits": { - "smithy.api#documentation": "

Contains the elements that set the ACL permissions for an object per grantee.

", - "smithy.api#httpPayload": {}, - "smithy.api#xmlName": "AccessControlPolicy" - } } } }, @@ -9385,13 +9400,6 @@ "smithy.api#required": {} } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "smithy.api#httpHeader": "x-amz-expected-bucket-owner" - } - }, "AnalyticsConfiguration": { "target": "com.amazonaws.s3#AnalyticsConfiguration", "traits": { @@ -9400,6 +9408,13 @@ "smithy.api#required": {}, "smithy.api#xmlName": "AnalyticsConfiguration" } + }, + "ExpectedBucketOwner": { + "target": "com.amazonaws.s3#AccountId", + "traits": { + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "smithy.api#httpHeader": "x-amz-expected-bucket-owner" + } } } }, @@ -9429,6 +9444,15 @@ "smithy.api#required": {} } }, + "CORSConfiguration": { + "target": "com.amazonaws.s3#CORSConfiguration", + "traits": { + "smithy.api#documentation": "

Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more\n information, see Enabling Cross-Origin Resource\n Sharing in the Amazon S3 User Guide.

", + "smithy.api#httpPayload": {}, + "smithy.api#required": {}, + "smithy.api#xmlName": "CORSConfiguration" + } + }, "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { @@ -9442,15 +9466,6 @@ "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } - }, - "CORSConfiguration": { - "target": "com.amazonaws.s3#CORSConfiguration", - "traits": { - "smithy.api#documentation": "

Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more\n information, see Enabling Cross-Origin Resource\n Sharing in the Amazon S3 User Guide.

", - "smithy.api#httpPayload": {}, - "smithy.api#required": {}, - "smithy.api#xmlName": "CORSConfiguration" - } } } }, @@ -9460,7 +9475,7 @@ "target": "com.amazonaws.s3#PutBucketEncryptionRequest" }, "traits": { - "smithy.api#documentation": "

This action uses the encryption subresource to configure default\n encryption and Amazon S3 Bucket Key for an existing bucket.

\n

Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys\n (SSE-S3) or Amazon Web Services KMS customer master keys (SSE-KMS). If you specify default encryption\n using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default\n encryption, see Amazon S3 default bucket encryption\n in the Amazon S3 User Guide. For more information about S3 Bucket Keys,\n see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

\n \n

This action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature\n Version 4).

\n
\n

To use this operation, you must have permissions to perform the\n s3:PutEncryptionConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources in the Amazon S3 User Guide.

\n \n

\n Related Resources\n

\n ", + "smithy.api#documentation": "

This action uses the encryption subresource to configure default\n encryption and Amazon S3 Bucket Key for an existing bucket.

\n

Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys\n (SSE-S3) or customer managed keys (SSE-KMS). If you specify default encryption\n using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default\n encryption, see Amazon S3 default bucket encryption\n in the Amazon S3 User Guide. For more information about S3 Bucket Keys,\n see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

\n \n

This action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature\n Version 4).

\n
\n

To use this operation, you must have permissions to perform the\n s3:PutEncryptionConfiguration action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources in the Amazon S3 User Guide.

\n \n

\n Related Resources\n

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?encryption", @@ -9475,7 +9490,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "

Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed\n keys (SSE-S3) or customer master keys stored in Amazon Web Services KMS (SSE-KMS). For information about\n the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption\n in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed\n keys (SSE-S3) or customer managed keys (SSE-KMS). For information about\n the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption\n in the Amazon S3 User Guide.

", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -9487,13 +9502,6 @@ "smithy.api#httpHeader": "Content-MD5" } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "smithy.api#httpHeader": "x-amz-expected-bucket-owner" - } - }, "ServerSideEncryptionConfiguration": { "target": "com.amazonaws.s3#ServerSideEncryptionConfiguration", "traits": { @@ -9501,6 +9509,13 @@ "smithy.api#required": {}, "smithy.api#xmlName": "ServerSideEncryptionConfiguration" } + }, + "ExpectedBucketOwner": { + "target": "com.amazonaws.s3#AccountId", + "traits": { + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "smithy.api#httpHeader": "x-amz-expected-bucket-owner" + } } } }, @@ -9510,7 +9525,7 @@ "target": "com.amazonaws.s3#PutBucketIntelligentTieringConfigurationRequest" }, "traits": { - "smithy.api#documentation": "

Puts a S3 Intelligent-Tiering configuration to the specified bucket.\n You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.

\n

The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.

\n

If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to\n PutBucketIntelligentTieringConfiguration include:

\n \n \n

You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically\n move objects stored in the S3 Intelligent-Tiering storage class to the\n Archive Access or Deep Archive Access tier.

\n
\n \n

\n Special Errors\n

\n ", + "smithy.api#documentation": "

Puts a S3 Intelligent-Tiering configuration to the specified bucket.\n You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.

\n

The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

\n

The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

\n

For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

\n

Operations related to\n PutBucketIntelligentTieringConfiguration include:

\n \n \n

You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically\n move objects stored in the S3 Intelligent-Tiering storage class to the\n Archive Access or Deep Archive Access tier.

\n
\n \n

\n Special Errors\n

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?intelligent-tiering", @@ -9581,13 +9596,6 @@ "smithy.api#required": {} } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "smithy.api#httpHeader": "x-amz-expected-bucket-owner" - } - }, "InventoryConfiguration": { "target": "com.amazonaws.s3#InventoryConfiguration", "traits": { @@ -9596,6 +9604,13 @@ "smithy.api#required": {}, "smithy.api#xmlName": "InventoryConfiguration" } + }, + "ExpectedBucketOwner": { + "target": "com.amazonaws.s3#AccountId", + "traits": { + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "smithy.api#httpHeader": "x-amz-expected-bucket-owner" + } } } }, @@ -9625,13 +9640,6 @@ "smithy.api#required": {} } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "smithy.api#httpHeader": "x-amz-expected-bucket-owner" - } - }, "LifecycleConfiguration": { "target": "com.amazonaws.s3#BucketLifecycleConfiguration", "traits": { @@ -9639,6 +9647,13 @@ "smithy.api#httpPayload": {}, "smithy.api#xmlName": "LifecycleConfiguration" } + }, + "ExpectedBucketOwner": { + "target": "com.amazonaws.s3#AccountId", + "traits": { + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "smithy.api#httpHeader": "x-amz-expected-bucket-owner" + } } } }, @@ -9668,6 +9683,15 @@ "smithy.api#required": {} } }, + "BucketLoggingStatus": { + "target": "com.amazonaws.s3#BucketLoggingStatus", + "traits": { + "smithy.api#documentation": "

Container for logging status information.

", + "smithy.api#httpPayload": {}, + "smithy.api#required": {}, + "smithy.api#xmlName": "BucketLoggingStatus" + } + }, "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { @@ -9681,15 +9705,6 @@ "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } - }, - "BucketLoggingStatus": { - "target": "com.amazonaws.s3#BucketLoggingStatus", - "traits": { - "smithy.api#documentation": "

Container for logging status information.

", - "smithy.api#httpPayload": {}, - "smithy.api#required": {}, - "smithy.api#xmlName": "BucketLoggingStatus" - } } } }, @@ -9699,7 +9714,7 @@ "target": "com.amazonaws.s3#PutBucketMetricsConfigurationRequest" }, "traits": { - "smithy.api#documentation": "

Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.\n You can have up to 1,000 metrics configurations per bucket. If you're updating an existing\n metrics configuration, note that this is a full replacement of the existing metrics\n configuration. If you don't include the elements you want to keep, they are erased.

\n\n

To use this operation, you must have permissions to perform the\n s3:PutMetricsConfiguration action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources.

\n\n

For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon\n CloudWatch.

\n\n

The following operations are related to\n PutBucketMetricsConfiguration:

\n \n\n

\n GetBucketLifecycle has the following special error:

\n ", + "smithy.api#documentation": "

Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.\n You can have up to 1,000 metrics configurations per bucket. If you're updating an existing\n metrics configuration, note that this is a full replacement of the existing metrics\n configuration. If you don't include the elements you want to keep, they are erased.

\n\n

To use this operation, you must have permissions to perform the\n s3:PutMetricsConfiguration action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources.

\n\n

For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon\n CloudWatch.

\n\n

The following operations are related to\n PutBucketMetricsConfiguration:

\n \n\n

\n GetBucketLifecycle has the following special error:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?metrics", @@ -9726,13 +9741,6 @@ "smithy.api#required": {} } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "smithy.api#httpHeader": "x-amz-expected-bucket-owner" - } - }, "MetricsConfiguration": { "target": "com.amazonaws.s3#MetricsConfiguration", "traits": { @@ -9741,6 +9749,13 @@ "smithy.api#required": {}, "smithy.api#xmlName": "MetricsConfiguration" } + }, + "ExpectedBucketOwner": { + "target": "com.amazonaws.s3#AccountId", + "traits": { + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "smithy.api#httpHeader": "x-amz-expected-bucket-owner" + } } } }, @@ -9769,13 +9784,6 @@ "smithy.api#required": {} } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "smithy.api#httpHeader": "x-amz-expected-bucket-owner" - } - }, "NotificationConfiguration": { "target": "com.amazonaws.s3#NotificationConfiguration", "traits": { @@ -9783,6 +9791,13 @@ "smithy.api#required": {}, "smithy.api#xmlName": "NotificationConfiguration" } + }, + "ExpectedBucketOwner": { + "target": "com.amazonaws.s3#AccountId", + "traits": { + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "smithy.api#httpHeader": "x-amz-expected-bucket-owner" + } } } }, @@ -9900,7 +9915,7 @@ "target": "com.amazonaws.s3#PutBucketReplicationRequest" }, "traits": { - "smithy.api#documentation": "

Creates a replication configuration or replaces an existing one. For more information,\n see Replication in the Amazon S3 User Guide.

\n \n

Specify the replication configuration in the request body. In the replication\n configuration, you provide the name of the destination bucket or buckets where you want\n Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your\n behalf, and other relevant information.

\n\n\n

A replication configuration must include at least one rule, and can contain a maximum of\n 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in\n the source bucket. To choose additional subsets of objects to replicate, add a rule for\n each subset.

\n\n

To specify a subset of the objects in the source bucket to apply a replication rule to,\n add the Filter element as a child of the Rule element. You can filter objects based on an\n object key prefix, one or more object tags, or both. When you add the Filter element in the\n configuration, you must also add the following elements:\n DeleteMarkerReplication, Status, and\n Priority.

\n \n

If you are using an earlier version of the replication configuration, Amazon S3 handles\n replication of delete markers differently. For more information, see Backward Compatibility.

\n
\n

For information about enabling versioning on a bucket, see Using Versioning.

\n\n

\n Handling Replication of Encrypted Objects\n

\n

By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side\n encryption with CMKs stored in Amazon Web Services KMS. To replicate Amazon Web Services KMS-encrypted objects, add the\n following: SourceSelectionCriteria, SseKmsEncryptedObjects,\n Status, EncryptionConfiguration, and\n ReplicaKmsKeyID. For information about replication configuration, see\n Replicating Objects\n Created with SSE Using CMKs stored in Amazon Web Services KMS.

\n\n

For information on PutBucketReplication errors, see List of\n replication-related error codes\n

\n\n

\n Permissions\n

\n

To create a PutBucketReplication request, you must have s3:PutReplicationConfiguration \n permissions for the bucket. \n

\n

By default, a resource owner, in this case the Amazon Web Services account that created the bucket, can\n perform this operation. The resource owner can also grant others permissions to perform the\n operation. For more information about permissions, see Specifying Permissions in a Policy\n and Managing Access Permissions to Your\n Amazon S3 Resources.

\n \n

To perform this operation, the user or role performing the action must have the\n iam:PassRole permission.

\n
\n\n

The following operations are related to PutBucketReplication:

\n ", + "smithy.api#documentation": "

Creates a replication configuration or replaces an existing one. For more information,\n see Replication in the Amazon S3 User Guide.

\n \n

Specify the replication configuration in the request body. In the replication\n configuration, you provide the name of the destination bucket or buckets where you want\n Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your\n behalf, and other relevant information.

\n\n\n

A replication configuration must include at least one rule, and can contain a maximum of\n 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in\n the source bucket. To choose additional subsets of objects to replicate, add a rule for\n each subset.

\n\n

To specify a subset of the objects in the source bucket to apply a replication rule to,\n add the Filter element as a child of the Rule element. You can filter objects based on an\n object key prefix, one or more object tags, or both. When you add the Filter element in the\n configuration, you must also add the following elements:\n DeleteMarkerReplication, Status, and\n Priority.

\n \n

If you are using an earlier version of the replication configuration, Amazon S3 handles\n replication of delete markers differently. For more information, see Backward Compatibility.

\n
\n

For information about enabling versioning on a bucket, see Using Versioning.

\n\n

\n Handling Replication of Encrypted Objects\n

\n

By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side\n encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects, add the\n following: SourceSelectionCriteria, SseKmsEncryptedObjects,\n Status, EncryptionConfiguration, and\n ReplicaKmsKeyID. For information about replication configuration, see\n Replicating Objects\n Created with SSE Using KMS keys.

\n\n

For information on PutBucketReplication errors, see List of\n replication-related error codes\n

\n\n

\n Permissions\n

\n

To create a PutBucketReplication request, you must have s3:PutReplicationConfiguration \n permissions for the bucket. \n

\n

By default, a resource owner, in this case the Amazon Web Services account that created the bucket, can\n perform this operation. The resource owner can also grant others permissions to perform the\n operation. For more information about permissions, see Specifying Permissions in a Policy\n and Managing Access Permissions to Your\n Amazon S3 Resources.

\n \n

To perform this operation, the user or role performing the action must have the\n iam:PassRole permission.

\n
\n\n

The following operations are related to PutBucketReplication:

\n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?replication", @@ -9927,6 +9942,14 @@ "smithy.api#httpHeader": "Content-MD5" } }, + "ReplicationConfiguration": { + "target": "com.amazonaws.s3#ReplicationConfiguration", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#required": {}, + "smithy.api#xmlName": "ReplicationConfiguration" + } + }, "Token": { "target": "com.amazonaws.s3#ObjectLockToken", "traits": { @@ -9940,14 +9963,6 @@ "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } - }, - "ReplicationConfiguration": { - "target": "com.amazonaws.s3#ReplicationConfiguration", - "traits": { - "smithy.api#httpPayload": {}, - "smithy.api#required": {}, - "smithy.api#xmlName": "ReplicationConfiguration" - } } } }, @@ -9984,13 +9999,6 @@ "smithy.api#httpHeader": "Content-MD5" } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "smithy.api#httpHeader": "x-amz-expected-bucket-owner" - } - }, "RequestPaymentConfiguration": { "target": "com.amazonaws.s3#RequestPaymentConfiguration", "traits": { @@ -9999,6 +10007,13 @@ "smithy.api#required": {}, "smithy.api#xmlName": "RequestPaymentConfiguration" } + }, + "ExpectedBucketOwner": { + "target": "com.amazonaws.s3#AccountId", + "traits": { + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "smithy.api#httpHeader": "x-amz-expected-bucket-owner" + } } } }, @@ -10035,13 +10050,6 @@ "smithy.api#httpHeader": "Content-MD5" } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "smithy.api#httpHeader": "x-amz-expected-bucket-owner" - } - }, "Tagging": { "target": "com.amazonaws.s3#Tagging", "traits": { @@ -10050,6 +10058,13 @@ "smithy.api#required": {}, "smithy.api#xmlName": "Tagging" } + }, + "ExpectedBucketOwner": { + "target": "com.amazonaws.s3#AccountId", + "traits": { + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "smithy.api#httpHeader": "x-amz-expected-bucket-owner" + } } } }, @@ -10093,13 +10108,6 @@ "smithy.api#httpHeader": "x-amz-mfa" } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "smithy.api#httpHeader": "x-amz-expected-bucket-owner" - } - }, "VersioningConfiguration": { "target": "com.amazonaws.s3#VersioningConfiguration", "traits": { @@ -10108,6 +10116,13 @@ "smithy.api#required": {}, "smithy.api#xmlName": "VersioningConfiguration" } + }, + "ExpectedBucketOwner": { + "target": "com.amazonaws.s3#AccountId", + "traits": { + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "smithy.api#httpHeader": "x-amz-expected-bucket-owner" + } } } }, @@ -10144,13 +10159,6 @@ "smithy.api#httpHeader": "Content-MD5" } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "smithy.api#httpHeader": "x-amz-expected-bucket-owner" - } - }, "WebsiteConfiguration": { "target": "com.amazonaws.s3#WebsiteConfiguration", "traits": { @@ -10159,6 +10167,13 @@ "smithy.api#required": {}, "smithy.api#xmlName": "WebsiteConfiguration" } + }, + "ExpectedBucketOwner": { + "target": "com.amazonaws.s3#AccountId", + "traits": { + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "smithy.api#httpHeader": "x-amz-expected-bucket-owner" + } } } }, @@ -10223,6 +10238,14 @@ "smithy.api#httpHeader": "x-amz-acl" } }, + "AccessControlPolicy": { + "target": "com.amazonaws.s3#AccessControlPolicy", + "traits": { + "smithy.api#documentation": "

Contains the elements that set the ACL permissions for an object per grantee.

", + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "AccessControlPolicy" + } + }, "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { @@ -10300,14 +10323,6 @@ "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } - }, - "AccessControlPolicy": { - "target": "com.amazonaws.s3#AccessControlPolicy", - "traits": { - "smithy.api#documentation": "

Contains the elements that set the ACL permissions for an object per grantee.

", - "smithy.api#httpPayload": {}, - "smithy.api#xmlName": "AccessControlPolicy" - } } } }, @@ -10359,6 +10374,14 @@ "smithy.api#required": {} } }, + "LegalHold": { + "target": "com.amazonaws.s3#ObjectLockLegalHold", + "traits": { + "smithy.api#documentation": "

Container element for the Legal Hold configuration you want to apply to the specified\n object.

", + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "LegalHold" + } + }, "RequestPayer": { "target": "com.amazonaws.s3#RequestPayer", "traits": { @@ -10385,14 +10408,6 @@ "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } - }, - "LegalHold": { - "target": "com.amazonaws.s3#ObjectLockLegalHold", - "traits": { - "smithy.api#documentation": "

Container element for the Legal Hold configuration you want to apply to the specified\n object.

", - "smithy.api#httpPayload": {}, - "smithy.api#xmlName": "LegalHold" - } } } }, @@ -10436,6 +10451,14 @@ "smithy.api#required": {} } }, + "ObjectLockConfiguration": { + "target": "com.amazonaws.s3#ObjectLockConfiguration", + "traits": { + "smithy.api#documentation": "

The Object Lock configuration that you want to apply to the specified bucket.

", + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "ObjectLockConfiguration" + } + }, "RequestPayer": { "target": "com.amazonaws.s3#RequestPayer", "traits": { @@ -10462,14 +10485,6 @@ "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } - }, - "ObjectLockConfiguration": { - "target": "com.amazonaws.s3#ObjectLockConfiguration", - "traits": { - "smithy.api#documentation": "

The Object Lock configuration that you want to apply to the specified bucket.

", - "smithy.api#httpPayload": {}, - "smithy.api#xmlName": "ObjectLockConfiguration" - } } } }, @@ -10493,7 +10508,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "

If you specified server-side encryption either with an Amazon Web Services KMS customer master key (CMK)\n or Amazon S3-managed encryption key in your PUT request, the response includes this header. It\n confirms the encryption algorithm that Amazon S3 used to encrypt the object.

", + "smithy.api#documentation": "

If you specified server-side encryption either with an Amazon Web Services KMS key\n or Amazon S3-managed encryption key in your PUT request, the response includes this header. It\n confirms the encryption algorithm that Amazon S3 used to encrypt the object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -10521,7 +10536,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If x-amz-server-side-encryption is present and has the value of\n aws:kms, this header specifies the ID of the Amazon Web Services Key Management Service\n (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) that was used for the\n object.

", + "smithy.api#documentation": "

If x-amz-server-side-encryption is present and has the value of\n aws:kms, this header specifies the ID of the Amazon Web Services Key Management Service\n (Amazon Web Services KMS) symmetric customer managed key that was used for the\n object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -10716,7 +10731,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If x-amz-server-side-encryption is present and has the value of\n aws:kms, this header specifies the ID of the Amazon Web Services Key Management Service\n (Amazon Web Services KMS) symmetrical customer managed customer master key (CMK) that was used for the\n object. If you specify x-amz-server-side-encryption:aws:kms, but do not\n provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services\n managed CMK in Amazon Web Services to protect the data. If the KMS key does not exist in the same account\n issuing the command, you must use the full ARN and not just the ID.\n

", + "smithy.api#documentation": "

If x-amz-server-side-encryption is present and has the value of\n aws:kms, this header specifies the ID of the Amazon Web Services Key Management Service\n (Amazon Web Services KMS) symmetrical customer managed key that was used for the\n object. If you specify x-amz-server-side-encryption:aws:kms, but do not\n provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services\n managed key to protect the data. If the KMS key does not exist in the same account\n issuing the command, you must use the full ARN and not just the ID.\n

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -10825,6 +10840,14 @@ "smithy.api#required": {} } }, + "Retention": { + "target": "com.amazonaws.s3#ObjectLockRetention", + "traits": { + "smithy.api#documentation": "

The container element for the Object Retention configuration.

", + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "Retention" + } + }, "RequestPayer": { "target": "com.amazonaws.s3#RequestPayer", "traits": { @@ -10858,14 +10881,6 @@ "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } - }, - "Retention": { - "target": "com.amazonaws.s3#ObjectLockRetention", - "traits": { - "smithy.api#documentation": "

The container element for the Object Retention configuration.

", - "smithy.api#httpPayload": {}, - "smithy.api#xmlName": "Retention" - } } } }, @@ -10932,6 +10947,15 @@ "smithy.api#httpHeader": "Content-MD5" } }, + "Tagging": { + "target": "com.amazonaws.s3#Tagging", + "traits": { + "smithy.api#documentation": "

Container for the TagSet and Tag elements

", + "smithy.api#httpPayload": {}, + "smithy.api#required": {}, + "smithy.api#xmlName": "Tagging" + } + }, "ExpectedBucketOwner": { "target": "com.amazonaws.s3#AccountId", "traits": { @@ -10944,15 +10968,6 @@ "traits": { "smithy.api#httpHeader": "x-amz-request-payer" } - }, - "Tagging": { - "target": "com.amazonaws.s3#Tagging", - "traits": { - "smithy.api#documentation": "

Container for the TagSet and Tag elements

", - "smithy.api#httpPayload": {}, - "smithy.api#required": {}, - "smithy.api#xmlName": "Tagging" - } } } }, @@ -10989,13 +11004,6 @@ "smithy.api#httpHeader": "Content-MD5" } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", - "smithy.api#httpHeader": "x-amz-expected-bucket-owner" - } - }, "PublicAccessBlockConfiguration": { "target": "com.amazonaws.s3#PublicAccessBlockConfiguration", "traits": { @@ -11004,6 +11012,13 @@ "smithy.api#required": {}, "smithy.api#xmlName": "PublicAccessBlockConfiguration" } + }, + "ExpectedBucketOwner": { + "target": "com.amazonaws.s3#AccountId", + "traits": { + "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", + "smithy.api#httpHeader": "x-amz-expected-bucket-owner" + } } } }, @@ -11249,7 +11264,7 @@ "SourceSelectionCriteria": { "target": "com.amazonaws.s3#SourceSelectionCriteria", "traits": { - "smithy.api#documentation": "

A container that describes additional filters for identifying the source objects that\n you want to replicate. You can choose to enable or disable the replication of these\n objects. Currently, Amazon S3 supports only the filter that you can specify for objects created\n with server-side encryption using a customer master key (CMK) stored in Amazon Web Services Key Management\n Service (SSE-KMS).

" + "smithy.api#documentation": "

A container that describes additional filters for identifying the source objects that\n you want to replicate. You can choose to enable or disable the replication of these\n objects. Currently, Amazon S3 supports only the filter that you can specify for objects created\n with server-side encryption using a customer managed key stored in Amazon Web Services Key Management\n Service (SSE-KMS).

" } }, "ExistingObjectReplication": { @@ -11565,6 +11580,13 @@ "smithy.api#httpQuery": "versionId" } }, + "RestoreRequest": { + "target": "com.amazonaws.s3#RestoreRequest", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "RestoreRequest" + } + }, "RequestPayer": { "target": "com.amazonaws.s3#RequestPayer", "traits": { @@ -11577,13 +11599,6 @@ "smithy.api#documentation": "

The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } - }, - "RestoreRequest": { - "target": "com.amazonaws.s3#RestoreRequest", - "traits": { - "smithy.api#httpPayload": {}, - "smithy.api#xmlName": "RestoreRequest" - } } } }, @@ -11772,7 +11787,7 @@ "KeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed\n customer master key (CMK) to use for encrypting inventory reports.

", + "smithy.api#documentation": "

Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed key\n to use for encrypting inventory reports.

", "smithy.api#required": {} } } @@ -11831,7 +11846,7 @@ "target": "com.amazonaws.s3#SelectObjectContentOutput" }, "traits": { - "smithy.api#documentation": "

This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.

\n

This action is not supported by Amazon S3 on Outposts.

\n

For more information about Amazon S3 Select,\n see Selecting Content from\n Objects in the Amazon S3 User Guide.

\n

For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select\n and S3 Glacier Select in the Amazon S3 User Guide.

\n

\n

\n Permissions\n

\n

You must have s3:GetObject permission for this operation. Amazon S3 Select does\n not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy\n in the Amazon S3 User Guide.

\n

\n

\n Object Data Formats\n

\n

You can use Amazon S3 Select to query objects that have the following format\n properties:

\n \n\n

\n Working with the Response Body\n

\n

Given the response size is unknown, Amazon S3 Select streams the response as a series of\n messages and includes a Transfer-Encoding header with chunked as\n its value in the response. For more information, see Appendix: SelectObjectContent\n Response.

\n\n

\n

\n GetObject Support\n

\n

The SelectObjectContent action does not support the following\n GetObject functionality. For more information, see GetObject.

\n \n

\n

\n Special Errors\n

\n\n

For a list of special errors for this operation, see List of\n SELECT Object Content Error Codes\n

\n

\n Related Resources\n

\n ", + "smithy.api#documentation": "

This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.

\n

This action is not supported by Amazon S3 on Outposts.

\n

For more information about Amazon S3 Select,\n see Selecting Content from\n Objects in the Amazon S3 User Guide.

\n

For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select\n and S3 Glacier Select in the Amazon S3 User Guide.

\n

\n

\n Permissions\n

\n

You must have s3:GetObject permission for this operation. Amazon S3 Select does\n not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy\n in the Amazon S3 User Guide.

\n

\n

\n Object Data Formats\n

\n

You can use Amazon S3 Select to query objects that have the following format\n properties:

\n \n\n

\n Working with the Response Body\n

\n

Given the response size is unknown, Amazon S3 Select streams the response as a series of\n messages and includes a Transfer-Encoding header with chunked as\n its value in the response. For more information, see Appendix: SelectObjectContent\n Response.

\n\n

\n

\n GetObject Support\n

\n

The SelectObjectContent action does not support the following\n GetObject functionality. For more information, see GetObject.

\n \n

\n

\n Special Errors\n

\n\n

For a list of special errors for this operation, see List of\n SELECT Object Content Error Codes\n

\n

\n Related Resources\n

\n ", "smithy.api#http": { "method": "POST", "uri": "/{Bucket}/{Key+}?select&select-type=2&x-id=SelectObjectContent", @@ -12101,7 +12116,7 @@ "type": "boolean" }, "com.amazonaws.s3#Size": { - "type": "integer" + "type": "long" }, "com.amazonaws.s3#SourceSelectionCriteria": { "type": "structure", @@ -12120,7 +12135,7 @@ } }, "traits": { - "smithy.api#documentation": "

A container that describes additional filters for identifying the source objects that\n you want to replicate. You can choose to enable or disable the replication of these\n objects. Currently, Amazon S3 supports only the filter that you can specify for objects created\n with server-side encryption using a customer master key (CMK) stored in Amazon Web Services Key Management\n Service (SSE-KMS).

" + "smithy.api#documentation": "

A container that describes additional filters for identifying the source objects that\n you want to replicate. You can choose to enable or disable the replication of these\n objects. Currently, Amazon S3 supports only the filter that you can specify for objects created\n with server-side encryption using a customer managed key stored in Amazon Web Services Key Management\n Service (SSE-KMS).

" } }, "com.amazonaws.s3#SseKmsEncryptedObjects": { @@ -12650,7 +12665,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.

", + "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key that was used for the object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -12845,7 +12860,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) was used for the object.

", + "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key was used for the object.

", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -13037,7 +13052,7 @@ "smithy.api#auth": [ "aws.auth#sigv4" ], - "smithy.api#documentation": "

Passes transformed\n objects to a GetObject operation when using Object Lambda Access Points. For information about\n Object Lambda Access Points, see Transforming objects with\n Object Lambda Access Points in the Amazon S3 User Guide.

\n

This operation supports metadata that can be returned by GetObject, in addition to\n RequestRoute, RequestToken, StatusCode,\n ErrorCode, and ErrorMessage. The GetObject\n response metadata is supported so that the WriteGetObjectResponse caller,\n typically an Lambda function, can provide the same metadata when it internally invokes\n GetObject. When WriteGetObjectResponse is called by a\n customer-owned Lambda function, the metadata returned to the end user\n GetObject call might differ from what Amazon S3 would normally return.

\n

You can include any number of metadata headers. When including a metadata header, it should be\n prefaced with x-amz-meta. For example, x-amz-meta-my-custom-header: MyCustomValue.\n The primary use case for this is to forward GetObject metadata.

\n

Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to detect and redact\n personally identifiable information (PII) and decompress S3 objects. These Lambda functions\n are available in the Amazon Web Services Serverless Application Repository, and can be selected through the Amazon Web Services Management Console when you create your\n Object Lambda Access Point.

\n

Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically detects personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.

\n

Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically redacts personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.

\n

Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.

\n

For information on how to view and use these functions, see Using Amazon Web Services built Lambda functions in the Amazon S3 User Guide.

", + "smithy.api#documentation": "

Passes transformed\n objects to a GetObject operation when using Object Lambda access points. For information about\n Object Lambda access points, see Transforming objects with\n Object Lambda access points in the Amazon S3 User Guide.

\n

This operation supports metadata that can be returned by GetObject, in addition to\n RequestRoute, RequestToken, StatusCode,\n ErrorCode, and ErrorMessage. The GetObject\n response metadata is supported so that the WriteGetObjectResponse caller,\n typically an Lambda function, can provide the same metadata when it internally invokes\n GetObject. When WriteGetObjectResponse is called by a\n customer-owned Lambda function, the metadata returned to the end user\n GetObject call might differ from what Amazon S3 would normally return.

\n

You can include any number of metadata headers. When including a metadata header, it should be\n prefaced with x-amz-meta. For example, x-amz-meta-my-custom-header: MyCustomValue.\n The primary use case for this is to forward GetObject metadata.

\n

Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to detect and redact\n personally identifiable information (PII) and decompress S3 objects. These Lambda functions\n are available in the Amazon Web Services Serverless Application Repository, and can be selected through the Amazon Web Services Management Console when you create your\n Object Lambda access point.

\n

Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically detects personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.

\n

Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically redacts personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.

\n

Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.

\n

For information on how to view and use these functions, see Using Amazon Web Services built Lambda functions in the Amazon S3 User Guide.

", "smithy.api#endpoint": { "hostPrefix": "{RequestRoute}." }, @@ -13266,7 +13281,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) that was used for stored in Amazon S3 object.

", + "smithy.api#documentation": "

If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed key that was used for stored in Amazon S3 object.

", "smithy.api#httpHeader": "x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" } }, diff --git a/codegen/sdk-codegen/aws-models/sagemaker.2017-07-24.json b/codegen/sdk-codegen/aws-models/sagemaker.2017-07-24.json index e1ba3b5eadc..cf019199a40 100644 --- a/codegen/sdk-codegen/aws-models/sagemaker.2017-07-24.json +++ b/codegen/sdk-codegen/aws-models/sagemaker.2017-07-24.json @@ -3912,7 +3912,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an action. An action is a lineage tracking entity that\n represents an action or activity. For example, a model deployment or an HPO job.\n Generally, an action involves at least one input or output artifact. For more information, see\n Amazon SageMaker\n ML Lineage Tracking.

\n \n

\n CreateAction can only be invoked from within an SageMaker managed\n environment. This includes SageMaker training jobs, processing jobs, transform jobs, and SageMaker\n notebooks. A call to CreateAction from outside one of these\n environments results in an error.

\n
" + "smithy.api#documentation": "

Creates an action. An action is a lineage tracking entity that\n represents an action or activity. For example, a model deployment or an HPO job.\n Generally, an action involves at least one input or output artifact. For more information, see\n Amazon SageMaker\n ML Lineage Tracking.

" } }, "com.amazonaws.sagemaker#CreateActionRequest": { @@ -4194,7 +4194,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an artifact. An artifact is a lineage tracking entity that\n represents a URI addressable object or data. Some examples are the S3 URI of a dataset and\n the ECR registry path of an image. For more information, see\n Amazon SageMaker\n ML Lineage Tracking.

\n \n

\n CreateArtifact can only be invoked from within an SageMaker managed\n environment. This includes SageMaker training jobs, processing jobs, transform jobs, and SageMaker\n notebooks. A call to CreateArtifact from outside one of these\n environments results in an error.

\n
" + "smithy.api#documentation": "

Creates an artifact. An artifact is a lineage tracking entity that\n represents a URI addressable object or data. Some examples are the S3 URI of a dataset and\n the ECR registry path of an image. For more information, see\n Amazon SageMaker\n ML Lineage Tracking.

" } }, "com.amazonaws.sagemaker#CreateArtifactRequest": { @@ -4496,7 +4496,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a context. A context is a lineage tracking entity that\n represents a logical grouping of other tracking or experiment entities. Some examples are\n an endpoint and a model package. For more information, see\n Amazon SageMaker\n ML Lineage Tracking.

\n \n

\n CreateContext can only be invoked from within an SageMaker managed\n environment. This includes SageMaker training jobs, processing jobs, transform jobs, and SageMaker\n notebooks. A call to CreateContext from outside one of these\n environments results in an error.

\n
" + "smithy.api#documentation": "

Creates a context. A context is a lineage tracking entity that\n represents a logical grouping of other tracking or experiment entities. Some examples are\n an endpoint and a model package. For more information, see\n Amazon SageMaker\n ML Lineage Tracking.

" } }, "com.amazonaws.sagemaker#CreateContextRequest": { @@ -4729,7 +4729,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates a Domain used by Amazon SageMaker Studio. A domain consists of an associated\n Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application,\n policy, and Amazon Virtual Private Cloud (VPC) configurations. An Amazon Web Services account is limited to one domain per region.\n Users within a domain can share notebook files and other artifacts with each other.

\n\n

\n EFS storage\n

\n

When a domain is created, an EFS volume is created for use by all of the users within the\n domain. Each user receives a private home directory within the EFS volume for notebooks,\n Git repositories, and data files.

\n

SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with\n an Amazon Web Services managed customer master key (CMK) by default. For more control, you can specify a\n customer managed CMK. For more information, see\n Protect Data at\n Rest Using Encryption.

\n\n

\n VPC configuration\n

\n

All SageMaker Studio traffic between the domain and the EFS volume is through the specified\n VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType\n parameter. AppNetworkAccessType corresponds to the network access type that you\n choose when you onboard to Studio. The following options are available:

\n \n \n

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules\n in order to launch a SageMaker Studio app successfully.

\n
\n

For more information, see\n Connect\n SageMaker Studio Notebooks to Resources in a VPC.

" + "smithy.api#documentation": "

Creates a Domain used by Amazon SageMaker Studio. A domain consists of an associated\n Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application,\n policy, and Amazon Virtual Private Cloud (VPC) configurations. An Amazon Web Services account is limited to one domain per region.\n Users within a domain can share notebook files and other artifacts with each other.

\n\n

\n EFS storage\n

\n

When a domain is created, an EFS volume is created for use by all of the users within the\n domain. Each user receives a private home directory within the EFS volume for notebooks,\n Git repositories, and data files.

\n

SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with\n an Amazon Web Services managed key by default. For more control, you can specify a\n customer managed key. For more information, see\n Protect Data at\n Rest Using Encryption.

\n\n

\n VPC configuration\n

\n

All SageMaker Studio traffic between the domain and the EFS volume is through the specified\n VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType\n parameter. AppNetworkAccessType corresponds to the network access type that you\n choose when you onboard to Studio. The following options are available:

\n \n \n

NFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules\n in order to launch a SageMaker Studio app successfully.

\n
\n

For more information, see\n Connect\n SageMaker Studio Notebooks to Resources in a VPC.

" } }, "com.amazonaws.sagemaker#CreateDomainRequest": { @@ -4794,7 +4794,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached to the domain with an Amazon Web Services managed\n customer master key (CMK) by default. For more control, specify a customer managed CMK.

" + "smithy.api#documentation": "

SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached to the domain with an Amazon Web Services managed\n key by default. For more control, specify a customer managed key.

" } } } @@ -4878,7 +4878,7 @@ "ResourceKey": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The CMK to use when encrypting the EBS volume the edge packaging job runs on.

" + "smithy.api#documentation": "

The Amazon Web Services KMS key to use when encrypting the EBS volume the edge packaging job runs on.

" } }, "Tags": { @@ -4920,7 +4920,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In\n the configuration, you identify one or more models, created using the\n CreateModel API, to deploy and the resources that you want Amazon SageMaker to\n provision. Then you call the CreateEndpoint API.

\n \n

Use this API if you want to use Amazon SageMaker hosting services to deploy models into\n production.

\n
\n

In the request, you define a ProductionVariant, for each model that you\n want to deploy. Each ProductionVariant parameter also describes the\n resources that you want Amazon SageMaker to provision. This includes the number and type of ML\n compute instances to deploy.

\n

If you are hosting multiple models, you also assign a VariantWeight to\n specify how much traffic you want to allocate to each model. For example, suppose that\n you want to host two models, A and B, and you assign traffic weight 2 for model A and 1\n for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to\n model B.

\n

For an example that calls this method when deploying a model to Amazon SageMaker hosting services,\n see Deploy the\n Model to Amazon SageMaker Hosting Services (Amazon Web Services SDK for Python (Boto\n 3)).\n

\n \n

When you call CreateEndpoint, a load call is made to DynamoDB to\n verify that your endpoint configuration exists. When you read data from a DynamoDB\n table supporting \n Eventually Consistent Reads\n , the response might not\n reflect the results of a recently completed write operation. The response might\n include some stale data. If the dependent entities are not yet in DynamoDB, this\n causes a validation error. If you repeat your read request after a short time, the\n response should return the latest data. So retry logic is recommended to handle\n these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

\n
" + "smithy.api#documentation": "

Creates an endpoint configuration that Amazon SageMaker hosting services uses to deploy models. In\n the configuration, you identify one or more models, created using the\n CreateModel API, to deploy and the resources that you want Amazon SageMaker to\n provision. Then you call the CreateEndpoint API.

\n \n

Use this API if you want to use Amazon SageMaker hosting services to deploy models into\n production.

\n
\n

In the request, you define a ProductionVariant, for each model that you\n want to deploy. Each ProductionVariant parameter also describes the\n resources that you want Amazon SageMaker to provision. This includes the number and type of ML\n compute instances to deploy.

\n

If you are hosting multiple models, you also assign a VariantWeight to\n specify how much traffic you want to allocate to each model. For example, suppose that\n you want to host two models, A and B, and you assign traffic weight 2 for model A and 1\n for model B. Amazon SageMaker distributes two-thirds of the traffic to Model A, and one-third to\n model B.

\n \n

When you call CreateEndpoint, a load call is made to DynamoDB to\n verify that your endpoint configuration exists. When you read data from a DynamoDB\n table supporting \n Eventually Consistent Reads\n , the response might not\n reflect the results of a recently completed write operation. The response might\n include some stale data. If the dependent entities are not yet in DynamoDB, this\n causes a validation error. If you repeat your read request after a short time, the\n response should return the latest data. So retry logic is recommended to handle\n these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.

\n
" } }, "com.amazonaws.sagemaker#CreateEndpointConfigInput": { @@ -6741,6 +6741,66 @@ } } }, + "com.amazonaws.sagemaker#CreateStudioLifecycleConfig": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#CreateStudioLifecycleConfigRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#CreateStudioLifecycleConfigResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceInUse" + } + ], + "traits": { + "smithy.api#documentation": "

Creates a new Studio Lifecycle Configuration.

" + } + }, + "com.amazonaws.sagemaker#CreateStudioLifecycleConfigRequest": { + "type": "structure", + "members": { + "StudioLifecycleConfigName": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", + "traits": { + "smithy.api#documentation": "

The name of the Studio Lifecycle Configuration to create.

", + "smithy.api#required": {} + } + }, + "StudioLifecycleConfigContent": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigContent", + "traits": { + "smithy.api#documentation": "

The content of your Studio Lifecycle Configuration script. This content must be base64 encoded.

", + "smithy.api#required": {} + } + }, + "StudioLifecycleConfigAppType": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigAppType", + "traits": { + "smithy.api#documentation": "

The App type that the Lifecycle Configuration is attached to.

", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.sagemaker#TagList", + "traits": { + "smithy.api#documentation": "

Tags to be associated with the Lifecycle Configuration. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API.

" + } + } + } + }, + "com.amazonaws.sagemaker#CreateStudioLifecycleConfigResponse": { + "type": "structure", + "members": { + "StudioLifecycleConfigArn": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigArn", + "traits": { + "smithy.api#documentation": "

The ARN of your created Lifecycle Configuration.

" + } + } + } + }, "com.amazonaws.sagemaker#CreateTrainingJob": { "type": "operation", "input": { @@ -8892,6 +8952,35 @@ } } }, + "com.amazonaws.sagemaker#DeleteStudioLifecycleConfig": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#DeleteStudioLifecycleConfigRequest" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceInUse" + }, + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Deletes the Studio Lifecycle Configuration. In order to delete the Lifecycle Configuration, there must be no running apps using the Lifecycle Configuration. You must also remove the Lifecycle Configuration from UserSettings in all Domains and UserProfiles.

" + } + }, + "com.amazonaws.sagemaker#DeleteStudioLifecycleConfigRequest": { + "type": "structure", + "members": { + "StudioLifecycleConfigName": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", + "traits": { + "smithy.api#documentation": "

The name of the Studio Lifecycle Configuration to delete.

", + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.sagemaker#DeleteTags": { "type": "operation", "input": { @@ -10556,7 +10645,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Web Services KMS customer managed CMK used to encrypt\n the EFS volume attached to the domain.

" + "smithy.api#documentation": "

The Amazon Web Services KMS customer managed key used to encrypt\n the EFS volume attached to the domain.

" } } } @@ -10640,7 +10729,7 @@ "ResourceKey": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The CMK to use when encrypting the EBS volume the job run on.

" + "smithy.api#documentation": "

The Amazon Web Services KMS key to use when encrypting the EBS volume the job run on.

" } }, "EdgePackagingJobStatus": { @@ -13460,6 +13549,76 @@ } } }, + "com.amazonaws.sagemaker#DescribeStudioLifecycleConfig": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#DescribeStudioLifecycleConfigRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#DescribeStudioLifecycleConfigResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Describes the Studio Lifecycle Configuration.

" + } + }, + "com.amazonaws.sagemaker#DescribeStudioLifecycleConfigRequest": { + "type": "structure", + "members": { + "StudioLifecycleConfigName": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", + "traits": { + "smithy.api#documentation": "

The name of the Studio Lifecycle Configuration to describe.

", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.sagemaker#DescribeStudioLifecycleConfigResponse": { + "type": "structure", + "members": { + "StudioLifecycleConfigArn": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigArn", + "traits": { + "smithy.api#documentation": "

The ARN of the Lifecycle Configuration to describe.

" + } + }, + "StudioLifecycleConfigName": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", + "traits": { + "smithy.api#documentation": "

The name of the Studio Lifecycle Configuration that is described.

" + } + }, + "CreationTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

The creation time of the Studio Lifecycle Configuration.

" + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

This value is equivalent to CreationTime because Studio Lifecycle Configurations are immutable.

" + } + }, + "StudioLifecycleConfigContent": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigContent", + "traits": { + "smithy.api#documentation": "

The content of your Studio Lifecycle Configuration script.

" + } + }, + "StudioLifecycleConfigAppType": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigAppType", + "traits": { + "smithy.api#documentation": "

The App type that the Lifecycle Configuration is attached to.

" + } + } + } + }, "com.amazonaws.sagemaker#DescribeSubscribedWorkteam": { "type": "operation", "input": { @@ -14111,7 +14270,7 @@ "CreatedBy": { "target": "com.amazonaws.sagemaker#UserContext", "traits": { - "smithy.api#documentation": "

Who created the component.

" + "smithy.api#documentation": "

Who created the trial component.

" } }, "LastModifiedTime": { @@ -15851,7 +16010,10 @@ } }, "CreatedBy": { - "target": "com.amazonaws.sagemaker#UserContext" + "target": "com.amazonaws.sagemaker#UserContext", + "traits": { + "smithy.api#documentation": "

Who created the experiment.

" + } }, "LastModifiedTime": { "target": "com.amazonaws.sagemaker#Timestamp", @@ -19000,6 +19162,12 @@ "traits": { "smithy.api#documentation": "

The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app.

" } + }, + "LifecycleConfigArns": { + "target": "com.amazonaws.sagemaker#LifecycleConfigArns", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp.

" + } } }, "traits": { @@ -19029,6 +19197,12 @@ "traits": { "smithy.api#documentation": "

A list of custom SageMaker images that are configured to run as a KernelGateway app.

" } + }, + "LifecycleConfigArns": { + "target": "com.amazonaws.sagemaker#LifecycleConfigArns", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.

" + } } }, "traits": { @@ -19409,7 +19583,7 @@ "VolumeKmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume\n attached to the ML compute instance(s) that run the training and inference jobs used for\n automated data labeling.

\n

You can only specify a VolumeKmsKeyId when you create a labeling job with\n automated data labeling enabled using the API operation CreateLabelingJob.\n You cannot specify an Amazon Web Services KMS customer managed CMK to encrypt the storage volume used for\n automated data labeling model training and inference when you create a labeling job\n using the console. To learn more, see Output Data and Storage Volume\n Encryption.

\n

The VolumeKmsKeyId can be any of the following formats:

\n " + "smithy.api#documentation": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume\n attached to the ML compute instance(s) that run the training and inference jobs used for\n automated data labeling.

\n

You can only specify a VolumeKmsKeyId when you create a labeling job with\n automated data labeling enabled using the API operation CreateLabelingJob.\n You cannot specify an Amazon Web Services KMS key to encrypt the storage volume used for\n automated data labeling model training and inference when you create a labeling job\n using the console. To learn more, see Output Data and Storage Volume\n Encryption.

\n

The VolumeKmsKeyId can be any of the following formats:

\n " } } }, @@ -19625,6 +19799,12 @@ "com.amazonaws.sagemaker#LastModifiedTime": { "type": "timestamp" }, + "com.amazonaws.sagemaker#LifecycleConfigArns": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigArn" + } + }, "com.amazonaws.sagemaker#LineageEntityParameters": { "type": "map", "key": { @@ -22631,7 +22811,7 @@ "NameContains": { "target": "com.amazonaws.sagemaker#ModelNameContains", "traits": { - "smithy.api#documentation": "

A string in the training job name. This filter returns only models in the training\n job whose name contains the specified string.

" + "smithy.api#documentation": "

A string in the model name. This filter returns only models whose \n name contains the specified string.

" } }, "CreationTimeBefore": { @@ -23606,6 +23786,110 @@ } } }, + "com.amazonaws.sagemaker#ListStudioLifecycleConfigs": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#ListStudioLifecycleConfigsRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#ListStudioLifecycleConfigsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ResourceInUse" + } + ], + "traits": { + "smithy.api#documentation": "

Lists the Studio Lifecycle Configurations in your Amazon Web Services Account.

", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.sagemaker#ListStudioLifecycleConfigsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "target": "com.amazonaws.sagemaker#MaxResults", + "traits": { + "smithy.api#documentation": "

The maximum number of Studio Lifecycle Configurations to return in the response. The default value is 10.

" + } + }, + "NextToken": { + "target": "com.amazonaws.sagemaker#NextToken", + "traits": { + "smithy.api#documentation": "

If the previous call to ListStudioLifecycleConfigs didn't return the full set of Lifecycle Configurations, the call returns a token for getting the next set of Lifecycle Configurations.

" + } + }, + "NameContains": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", + "traits": { + "smithy.api#documentation": "

A string in the Lifecycle Configuration name. This filter returns only Lifecycle Configurations whose name contains the specified string.

" + } + }, + "AppTypeEquals": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigAppType", + "traits": { + "smithy.api#documentation": "

A parameter to search for the App Type to which the Lifecycle Configuration is attached.

" + } + }, + "CreationTimeBefore": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

A filter that returns only Lifecycle Configurations created on or before the specified time.

" + } + }, + "CreationTimeAfter": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

A filter that returns only Lifecycle Configurations created on or after the specified time.

" + } + }, + "ModifiedTimeBefore": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

A filter that returns only Lifecycle Configurations modified before the specified time.

" + } + }, + "ModifiedTimeAfter": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

A filter that returns only Lifecycle Configurations modified after the specified time.

" + } + }, + "SortBy": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigSortKey", + "traits": { + "smithy.api#documentation": "

The property used to sort results. The default value is CreationTime.

" + } + }, + "SortOrder": { + "target": "com.amazonaws.sagemaker#SortOrder", + "traits": { + "smithy.api#documentation": "

The sort order. The default value is Descending.

" + } + } + } + }, + "com.amazonaws.sagemaker#ListStudioLifecycleConfigsResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.sagemaker#NextToken", + "traits": { + "smithy.api#documentation": "

A token for getting the next set of actions, if there are any.

" + } + }, + "StudioLifecycleConfigs": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigsList", + "traits": { + "smithy.api#documentation": "

A list of Lifecycle Configurations and their properties.

" + } + } + } + }, "com.amazonaws.sagemaker#ListSubscribedWorkteams": { "type": "operation", "input": { @@ -27777,7 +28061,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using\n Amazon S3 server-side encryption. The KmsKeyId can be any of the following\n formats:

\n \n \n

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must\n include permissions to call kms:Encrypt. If you don't provide a KMS key ID,\n Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side\n encryption with KMS-managed keys for OutputDataConfig. If you use a bucket\n policy with an s3:PutObject permission that only allows objects with\n server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption to \"aws:kms\". For more\n information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n

\n

The KMS key policy must grant permission to the IAM role that you specify in your\n CreateTrainingJob, CreateTransformJob, or\n CreateHyperParameterTuningJob requests. For more information, see\n Using\n Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer\n Guide.

" + "smithy.api#documentation": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using\n Amazon S3 server-side encryption. The KmsKeyId can be any of the following\n formats:

\n \n \n

If you use a KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must\n include permissions to call kms:Encrypt. If you don't provide a KMS key ID,\n Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side\n encryption with KMS-managed keys for OutputDataConfig. If you use a bucket\n policy with an s3:PutObject permission that only allows objects with\n server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption to \"aws:kms\". For more\n information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n

\n

The KMS key policy must grant permission to the IAM role that you specify in your\n CreateTrainingJob, CreateTransformJob, or\n CreateHyperParameterTuningJob requests. For more information, see\n Using\n Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer\n Guide.

" } }, "S3OutputPath": { @@ -29583,7 +29867,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the core dump data at rest using\n Amazon S3 server-side encryption. The KmsKeyId can be any of the following\n formats:

\n \n \n

If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must\n include permissions to call kms:Encrypt. If you don't provide a KMS key ID,\n Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side\n encryption with KMS-managed keys for OutputDataConfig. If you use a bucket\n policy with an s3:PutObject permission that only allows objects with\n server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption to \"aws:kms\". For more\n information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n

\n

The KMS key policy must grant permission to the IAM role that you specify in your\n CreateEndpoint and UpdateEndpoint requests. For more\n information, see Using Key Policies in Amazon Web Services\n KMS in the Amazon Web Services Key Management Service Developer Guide.

" + "smithy.api#documentation": "

The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the core dump data at rest using\n Amazon S3 server-side encryption. The KmsKeyId can be any of the following\n formats:

\n \n \n

If you use a KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must\n include permissions to call kms:Encrypt. If you don't provide a KMS key ID,\n Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side\n encryption with KMS-managed keys for OutputDataConfig. If you use a bucket\n policy with an s3:PutObject permission that only allows objects with\n server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption to \"aws:kms\". For more\n information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n

\n

The KMS key policy must grant permission to the IAM role that you specify in your\n CreateEndpoint and UpdateEndpoint requests. For more\n information, see Using Key Policies in Amazon Web Services\n KMS in the Amazon Web Services Key Management Service Developer Guide.

" } } }, @@ -30139,6 +30423,68 @@ ] } }, + "com.amazonaws.sagemaker#Project": { + "type": "structure", + "members": { + "ProjectArn": { + "target": "com.amazonaws.sagemaker#ProjectArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the project.

" + } + }, + "ProjectName": { + "target": "com.amazonaws.sagemaker#ProjectEntityName", + "traits": { + "smithy.api#documentation": "

The name of the project.

" + } + }, + "ProjectId": { + "target": "com.amazonaws.sagemaker#ProjectId", + "traits": { + "smithy.api#documentation": "

The ID of the project.

" + } + }, + "ProjectDescription": { + "target": "com.amazonaws.sagemaker#EntityDescription", + "traits": { + "smithy.api#documentation": "

The description of the project.

" + } + }, + "ServiceCatalogProvisioningDetails": { + "target": "com.amazonaws.sagemaker#ServiceCatalogProvisioningDetails" + }, + "ServiceCatalogProvisionedProductDetails": { + "target": "com.amazonaws.sagemaker#ServiceCatalogProvisionedProductDetails" + }, + "ProjectStatus": { + "target": "com.amazonaws.sagemaker#ProjectStatus", + "traits": { + "smithy.api#documentation": "

The status of the project.

" + } + }, + "CreatedBy": { + "target": "com.amazonaws.sagemaker#UserContext", + "traits": { + "smithy.api#documentation": "

Who created the project.

" + } + }, + "CreationTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

A timestamp specifying when the project was created.

" + } + }, + "Tags": { + "target": "com.amazonaws.sagemaker#TagList", + "traits": { + "smithy.api#documentation": "

An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in\n different ways, for example, by purpose, owner, or environment. For more information,\n see Tagging Amazon Web Services\n Resources.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

The properties of a project as returned by the Search API.

" + } + }, "com.amazonaws.sagemaker#ProjectArn": { "type": "string", "traits": { @@ -30978,6 +31324,12 @@ "traits": { "smithy.api#documentation": "

The instance type that the image version runs on.

" } + }, + "LifecycleConfigArn": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.

" + } } }, "traits": { @@ -31027,6 +31379,10 @@ { "value": "FeatureGroup", "name": "FEATURE_GROUP" + }, + { + "value": "Project", + "name": "PROJECT" } ] } @@ -31076,6 +31432,60 @@ ] } }, + "com.amazonaws.sagemaker#RetryPipelineExecution": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#RetryPipelineExecutionRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#RetryPipelineExecutionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ConflictException" + }, + { + "target": "com.amazonaws.sagemaker#ResourceLimitExceeded" + }, + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

Retry the execution of the pipeline.

" + } + }, + "com.amazonaws.sagemaker#RetryPipelineExecutionRequest": { + "type": "structure", + "members": { + "PipelineExecutionArn": { + "target": "com.amazonaws.sagemaker#PipelineExecutionArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the pipeline execution.

", + "smithy.api#required": {} + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.sagemaker#IdempotencyToken", + "traits": { + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n operation. An idempotent operation completes no more than once.

", + "smithy.api#idempotencyToken": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.sagemaker#RetryPipelineExecutionResponse": { + "type": "structure", + "members": { + "PipelineExecutionArn": { + "target": "com.amazonaws.sagemaker#PipelineExecutionArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the pipeline execution.

" + } + } + } + }, "com.amazonaws.sagemaker#RetryStrategy": { "type": "structure", "members": { @@ -31401,6 +31811,9 @@ { "target": "com.amazonaws.sagemaker#CreateProject" }, + { + "target": "com.amazonaws.sagemaker#CreateStudioLifecycleConfig" + }, { "target": "com.amazonaws.sagemaker#CreateTrainingJob" }, @@ -31515,6 +31928,9 @@ { "target": "com.amazonaws.sagemaker#DeleteProject" }, + { + "target": "com.amazonaws.sagemaker#DeleteStudioLifecycleConfig" + }, { "target": "com.amazonaws.sagemaker#DeleteTags" }, @@ -31650,6 +32066,9 @@ { "target": "com.amazonaws.sagemaker#DescribeProject" }, + { + "target": "com.amazonaws.sagemaker#DescribeStudioLifecycleConfig" + }, { "target": "com.amazonaws.sagemaker#DescribeSubscribedWorkteam" }, @@ -31824,6 +32243,9 @@ { "target": "com.amazonaws.sagemaker#ListProjects" }, + { + "target": "com.amazonaws.sagemaker#ListStudioLifecycleConfigs" + }, { "target": "com.amazonaws.sagemaker#ListSubscribedWorkteams" }, @@ -31863,6 +32285,9 @@ { "target": "com.amazonaws.sagemaker#RenderUiTemplate" }, + { + "target": "com.amazonaws.sagemaker#RetryPipelineExecution" + }, { "target": "com.amazonaws.sagemaker#Search" }, @@ -32183,6 +32608,12 @@ }, "FeatureGroup": { "target": "com.amazonaws.sagemaker#FeatureGroup" + }, + "Project": { + "target": "com.amazonaws.sagemaker#Project", + "traits": { + "smithy.api#documentation": "

The properties of a project.

" + } } }, "traits": { @@ -32589,7 +33020,7 @@ } }, "traits": { - "smithy.api#documentation": "

Details that you specify to provision a service catalog product. For information about\n service catalog, see .What is Amazon Web Services Service\n Catalog.

" + "smithy.api#documentation": "

Details that you specify to provision a service catalog product. For information about\n service catalog, see What is Amazon Web Services Service\n Catalog.

" } }, "com.amazonaws.sagemaker#SessionExpirationDurationInSeconds": { @@ -33044,7 +33475,7 @@ "ClientRequestToken": { "target": "com.amazonaws.sagemaker#IdempotencyToken", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n operation. An idempotent operation completes no more than one time.

", + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n operation. An idempotent operation completes no more than once.

", "smithy.api#idempotencyToken": {}, "smithy.api#required": {} } @@ -33318,7 +33749,7 @@ "ClientRequestToken": { "target": "com.amazonaws.sagemaker#IdempotencyToken", "traits": { - "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n operation. An idempotent operation completes no more than one time.

", + "smithy.api#documentation": "

A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n operation. An idempotent operation completes no more than once.

", "smithy.api#idempotencyToken": {}, "smithy.api#required": {} } @@ -33493,6 +33924,114 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.sagemaker#StudioLifecycleConfigAppType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "JupyterServer", + "name": "JupyterServer" + }, + { + "value": "KernelGateway", + "name": "KernelGateway" + } + ] + } + }, + "com.amazonaws.sagemaker#StudioLifecycleConfigArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:studio-lifecycle-config/" + } + }, + "com.amazonaws.sagemaker#StudioLifecycleConfigContent": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 16384 + }, + "smithy.api#pattern": "^[\\S\\s]+$" + } + }, + "com.amazonaws.sagemaker#StudioLifecycleConfigDetails": { + "type": "structure", + "members": { + "StudioLifecycleConfigArn": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigArn", + "traits": { + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Lifecycle Configuration.

" + } + }, + "StudioLifecycleConfigName": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigName", + "traits": { + "smithy.api#documentation": "

The name of the Studio Lifecycle Configuration.

" + } + }, + "CreationTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

The creation time of the Studio Lifecycle Configuration.

" + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "

This value is equivalent to CreationTime because Studio Lifecycle Configurations are immutable.

" + } + }, + "StudioLifecycleConfigAppType": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigAppType", + "traits": { + "smithy.api#documentation": "

The App type to which the Lifecycle Configuration is attached.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Details of the Studio Lifecycle Configuration.

" + } + }, + "com.amazonaws.sagemaker#StudioLifecycleConfigName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 63 + }, + "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}$" + } + }, + "com.amazonaws.sagemaker#StudioLifecycleConfigSortKey": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CreationTime", + "name": "CreationTime" + }, + { + "value": "LastModifiedTime", + "name": "LastModifiedTime" + }, + { + "value": "Name", + "name": "Name" + } + ] + } + }, + "com.amazonaws.sagemaker#StudioLifecycleConfigsList": { + "type": "list", + "member": { + "target": "com.amazonaws.sagemaker#StudioLifecycleConfigDetails" + } + }, "com.amazonaws.sagemaker#SubnetId": { "type": "string", "traits": { @@ -33794,6 +34333,10 @@ { "value": "jacinto_tda4vm", "name": "JACINTO_TDA4VM" + }, + { + "value": "imx8mplus", + "name": "IMX8MPLUS" } ] } @@ -35519,7 +36062,10 @@ } }, "CreatedBy": { - "target": "com.amazonaws.sagemaker#UserContext" + "target": "com.amazonaws.sagemaker#UserContext", + "traits": { + "smithy.api#documentation": "

Who created the trial.

" + } }, "LastModifiedTime": { "target": "com.amazonaws.sagemaker#Timestamp", @@ -35609,7 +36155,10 @@ } }, "CreatedBy": { - "target": "com.amazonaws.sagemaker#UserContext" + "target": "com.amazonaws.sagemaker#UserContext", + "traits": { + "smithy.api#documentation": "

Who created the trial component.

" + } }, "LastModifiedTime": { "target": "com.amazonaws.sagemaker#Timestamp", @@ -36064,7 +36613,7 @@ "CreatedBy": { "target": "com.amazonaws.sagemaker#UserContext", "traits": { - "smithy.api#documentation": "

Who created the component.

" + "smithy.api#documentation": "

Who created the trial component.

" } }, "LastModifiedTime": { @@ -37686,7 +38235,7 @@ } }, "traits": { - "smithy.api#documentation": "

Information about the user who created or modified an experiment, trial, or trial\n component.

" + "smithy.api#documentation": "

Information about the user who created or modified an experiment, trial, trial\n component, or project.

" } }, "com.amazonaws.sagemaker#UserProfileArn": { diff --git a/codegen/sdk-codegen/aws-models/transcribe.2017-10-26.json b/codegen/sdk-codegen/aws-models/transcribe.2017-10-26.json index 02a3ea6db08..19348a5f836 100644 --- a/codegen/sdk-codegen/aws-models/transcribe.2017-10-26.json +++ b/codegen/sdk-codegen/aws-models/transcribe.2017-10-26.json @@ -1886,7 +1886,7 @@ "AllowDeferredExecution": { "target": "com.amazonaws.transcribe#Boolean", "traits": { - "smithy.api#documentation": "

Indicates whether a job should be queued by Amazon Transcribe when the concurrent execution limit is exceeded. When the\n AllowDeferredExecution field is true, jobs are queued and executed when the number of executing\n jobs falls below the concurrent execution limit. If the field is false, Amazon Transcribe returns a LimitExceededException\n exception.

\n

If you specify the AllowDeferredExecution field, you must specify the \n DataAccessRoleArn field.

" + "smithy.api#documentation": "

Indicates whether a job should be queued by Amazon Transcribe when the concurrent execution limit is exceeded. When the\n AllowDeferredExecution field is true, jobs are queued and executed when the number of executing\n jobs falls below the concurrent execution limit. If the field is false, Amazon Transcribe returns a \n LimitExceededException exception.

\n

Note that job queuing is enabled by default for call analytics jobs.

\n

If you specify the AllowDeferredExecution field, you must specify the \n DataAccessRoleArn field.

" } }, "DataAccessRoleArn": { @@ -1900,6 +1900,21 @@ "smithy.api#documentation": "

Provides information about when a transcription job should be executed.

" } }, + "com.amazonaws.transcribe#KMSEncryptionContextMap": { + "type": "map", + "key": { + "target": "com.amazonaws.transcribe#NonEmptyString" + }, + "value": { + "target": "com.amazonaws.transcribe#NonEmptyString" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, "com.amazonaws.transcribe#KMSKeyId": { "type": "string", "traits": { @@ -3852,6 +3867,12 @@ "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Web Services Key Management Service (KMS) key used to \n encrypt the output of the transcription job. The user calling the StartMedicalTranscriptionJob \n operation must have permission to use the specified KMS key.

\n

You use either of the following to identify a KMS key in the current account:

\n \n

You can use either of the following to identify a KMS key in the current account or another account:

\n \n

If you don't specify an encryption key, the output of the medical transcription job is encrypted with the default Amazon S3 \n key (SSE-S3).

\n

If you specify a KMS key to encrypt your output, you must also specify an output location in the \n OutputBucketName parameter.

" } }, + "KMSEncryptionContext": { + "target": "com.amazonaws.transcribe#KMSEncryptionContextMap", + "traits": { + "smithy.api#documentation": "

A map of plain text, non-secret key:value pairs, known as encryption context pairs, that provide an added\n layer of security for your data.

" + } + }, "Settings": { "target": "com.amazonaws.transcribe#MedicalTranscriptionSetting", "traits": { @@ -3982,6 +4003,12 @@ "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the Amazon Web Services Key Management Service (KMS) key used to \n encrypt the output of the transcription job. The user calling the StartTranscriptionJob \n operation must have permission to use the specified KMS key.

\n

You can use either of the following to identify a KMS key in the current account:

\n \n

You can use either of the following to identify a KMS key in the current account or another account:

\n \n

If you don't specify an encryption key, the output of the transcription job is encrypted with the default \n Amazon S3 key (SSE-S3).

\n

If you specify a KMS key to encrypt your output, you must also specify an output location in the \n OutputBucketName parameter.

" } }, + "KMSEncryptionContext": { + "target": "com.amazonaws.transcribe#KMSEncryptionContextMap", + "traits": { + "smithy.api#documentation": "

A map of plain text, non-secret key:value pairs, known as encryption context pairs, that provide an added\n layer of security for your data.

" + } + }, "Settings": { "target": "com.amazonaws.transcribe#Settings", "traits": { @@ -4018,6 +4045,12 @@ "smithy.api#documentation": "

An object containing a list of languages that might be present in your collection of audio files. Automatic language\n identification chooses a language that best matches the source audio from that list.

\n

To transcribe speech in Modern Standard Arabic (ar-SA), your audio or video file must be encoded at a sample \n rate of 16,000 Hz or higher.

" } }, + "Subtitles": { + "target": "com.amazonaws.transcribe#Subtitles", + "traits": { + "smithy.api#documentation": "

Add subtitles to your batch transcription job.

" + } + }, "Tags": { "target": "com.amazonaws.transcribe#TagList", "traits": { @@ -4051,6 +4084,67 @@ } } }, + "com.amazonaws.transcribe#SubtitleFileUris": { + "type": "list", + "member": { + "target": "com.amazonaws.transcribe#Uri" + } + }, + "com.amazonaws.transcribe#SubtitleFormat": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "vtt", + "name": "VTT" + }, + { + "value": "srt", + "name": "SRT" + } + ] + } + }, + "com.amazonaws.transcribe#SubtitleFormats": { + "type": "list", + "member": { + "target": "com.amazonaws.transcribe#SubtitleFormat" + } + }, + "com.amazonaws.transcribe#Subtitles": { + "type": "structure", + "members": { + "Formats": { + "target": "com.amazonaws.transcribe#SubtitleFormats", + "traits": { + "smithy.api#documentation": "

Specify the output format for your subtitle file.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Generate subtitles for your batch transcription job.

" + } + }, + "com.amazonaws.transcribe#SubtitlesOutput": { + "type": "structure", + "members": { + "Formats": { + "target": "com.amazonaws.transcribe#SubtitleFormats", + "traits": { + "smithy.api#documentation": "

Specify the output format for your subtitle file; if you select both SRT and VTT formats, two output files are genereated.

" + } + }, + "SubtitleFileUris": { + "target": "com.amazonaws.transcribe#SubtitleFileUris", + "traits": { + "smithy.api#documentation": "

Choose the output location for your subtitle file. This location must be an S3 bucket.

" + } + } + }, + "traits": { + "smithy.api#documentation": "

Specify the output format for your subtitle file.

" + } + }, "com.amazonaws.transcribe#Tag": { "type": "structure", "members": { @@ -4524,6 +4618,12 @@ "traits": { "smithy.api#documentation": "

A key:value pair assigned to a given transcription job.

" } + }, + "Subtitles": { + "target": "com.amazonaws.transcribe#SubtitlesOutput", + "traits": { + "smithy.api#documentation": "

Generate subtitles for your batch transcription job.

" + } } }, "traits": { diff --git a/codegen/sdk-codegen/aws-models/wafv2.2019-07-29.json b/codegen/sdk-codegen/aws-models/wafv2.2019-07-29.json index e0e1e569647..d94bd6650ca 100644 --- a/codegen/sdk-codegen/aws-models/wafv2.2019-07-29.json +++ b/codegen/sdk-codegen/aws-models/wafv2.2019-07-29.json @@ -229,14 +229,14 @@ "type": "structure", "members": {}, "traits": { - "smithy.api#documentation": "

Inspect all of the elements that WAF has parsed and extracted from the web request\n JSON body that are within the JsonBody\n MatchScope. This is used with the FieldToMatch option\n JsonBody.\n \n

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

" + "smithy.api#documentation": "

Inspect all of the elements that WAF has parsed and extracted from the web request\n JSON body that are within the JsonBody\n MatchScope. This is used with the FieldToMatch option\n JsonBody.\n \n

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

\n

JSON specification: \"All\": {}\n

" } }, "com.amazonaws.wafv2#AllQueryArguments": { "type": "structure", "members": {}, "traits": { - "smithy.api#documentation": "

All query arguments of a web request.

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

" + "smithy.api#documentation": "

All query arguments of a web request.

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

\n

JSON specification: \"AllQueryArguments\": {}\n

" } }, "com.amazonaws.wafv2#AllowAction": { @@ -338,7 +338,7 @@ "type": "structure", "members": {}, "traits": { - "smithy.api#documentation": "

The body of a web request. This immediately follows the request headers.

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

" + "smithy.api#documentation": "

The body of a web request. This immediately follows the request headers.

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

\n

JSON specification: \"Body\": {}\n

" } }, "com.amazonaws.wafv2#BodyParsingFallbackBehavior": { @@ -2862,7 +2862,7 @@ } }, "traits": { - "smithy.api#documentation": "

The part of a web request that you want WAF to inspect. Include the single\n FieldToMatch type that you want to inspect, with additional specifications\n as needed, according to the type. You specify a single request component in\n FieldToMatch for each rule statement that requires it. To inspect more than\n one component of a web request, create a separate rule statement for each component.

" + "smithy.api#documentation": "

The part of a web request that you want WAF to inspect. Include the single FieldToMatch type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch for each rule statement that requires it. To inspect more than one component of a web request, create a separate rule statement for each component.

\n

JSON specification for a QueryString field to match:

\n

\n \"FieldToMatch\": { \"QueryString\": {} }\n

\n

Example JSON for a Method field to match specification:

\n

\n \"FieldToMatch\": { \"Method\": { \"Name\": \"DELETE\" } }\n

" } }, "com.amazonaws.wafv2#FieldToMatchData": { @@ -3006,7 +3006,7 @@ "RuleGroupReferenceStatement": { "target": "com.amazonaws.wafv2#RuleGroupReferenceStatement", "traits": { - "smithy.api#documentation": "

A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.

\n

You cannot nest a RuleGroupReferenceStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

" + "smithy.api#documentation": "

A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.

\n

You cannot nest a RuleGroupReferenceStatement, for example for use inside a NotStatement or OrStatement. You \n can only use a rule group reference statement at the top level inside a web ACL.

" } } }, @@ -3226,7 +3226,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the specified managed rule set.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" + "smithy.api#documentation": "

Retrieves the specified managed rule set.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" } }, "com.amazonaws.wafv2#GetManagedRuleSetRequest": { @@ -3341,7 +3341,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the keys that are currently blocked by a rate-based rule. The maximum number\n of managed keys that can be blocked for a single rate-based rule is 10,000. If more than\n 10,000 addresses exceed the rate limit, those with the highest rates are blocked.

" + "smithy.api#documentation": "

Retrieves the keys that are currently blocked by a rate-based rule instance. The maximum number of managed keys that can be blocked for a single rate-based rule instance is 10,000. If more than 10,000 addresses exceed the rate limit, those with the highest rates are blocked.

\n

For a rate-based rule that you've defined inside a rule group, provide the name of the rule group reference statement in your request, in addition to the rate-based rule name and the web ACL name.

\n

WAF monitors web requests and manages keys independently for each unique combination of web ACL, optional rule group, and rate-based rule. For example, if you define a rate-based rule inside a rule group, and then use the rule group in a web ACL, WAF monitors web requests and manages keys for that web ACL, rule group reference statement, and rate-based rule instance. If you use the same rule group in a second web ACL, WAF monitors web requests and manages keys for this second usage completely independent of your first.

" } }, "com.amazonaws.wafv2#GetRateBasedStatementManagedKeysRequest": { @@ -3368,10 +3368,16 @@ "smithy.api#required": {} } }, + "RuleGroupRuleName": { + "target": "com.amazonaws.wafv2#EntityName", + "traits": { + "smithy.api#documentation": "

The name of the rule group reference statement in your web ACL. This is required only when you have the rate-based rule nested \ninside a rule group.

" + } + }, "RuleName": { "target": "com.amazonaws.wafv2#EntityName", "traits": { - "smithy.api#documentation": "

The name of the rate-based rule to get the keys for.

", + "smithy.api#documentation": "

The name of the rate-based rule to get the keys for. If you have the rule defined inside a rule group that you're using in your web ACL, also provide the name of the rule group reference statement in the request parameter RuleGroupRuleName.

", "smithy.api#required": {} } } @@ -4026,7 +4032,7 @@ } }, "traits": { - "smithy.api#documentation": "

The body of a web request, inspected as JSON. The body immediately follows the request\n headers. This is used in the FieldToMatch specification.

\n

Use the specifications in this object to indicate which parts of the JSON body to\n inspect using the rule's inspection criteria. WAF inspects only the parts of the JSON\n that result from the matches that you\n indicate.\n

" + "smithy.api#documentation": "

The body of a web request, inspected as JSON. The body immediately follows the request\n headers. This is used in the FieldToMatch specification.

\n

Use the specifications in this object to indicate which parts of the JSON body to\n inspect using the rule's inspection criteria. WAF inspects only the parts of the JSON\n that result from the matches that you\n indicate.\n

\n

Example JSON: \"JsonBody\": { \"MatchPattern\": { \"All\": {} }, \"MatchScope\": \"ALL\" }\n

" } }, "com.amazonaws.wafv2#JsonMatchPattern": { @@ -4300,7 +4306,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves an array of managed rule groups that are available for you to use. This list\n includes all Amazon Web Services Managed Rules rule groups and all of the Marketplace managed rule groups that you're\n subscribed to.

" + "smithy.api#documentation": "

Retrieves an array of managed rule groups that are available for you to use. This list\n includes all Amazon Web Services Managed Rules rule groups and all of the Amazon Web Services Marketplace managed rule groups that you're\n subscribed to.

" } }, "com.amazonaws.wafv2#ListAvailableManagedRuleGroupsRequest": { @@ -4491,7 +4497,7 @@ } ], "traits": { - "smithy.api#documentation": "

Retrieves the managed rule sets that you own.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" + "smithy.api#documentation": "

Retrieves the managed rule sets that you own.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" } }, "com.amazonaws.wafv2#ListManagedRuleSetsRequest": { @@ -4906,7 +4912,7 @@ "RedactedFields": { "target": "com.amazonaws.wafv2#RedactedFields", "traits": { - "smithy.api#documentation": "

The parts of the request that you want to keep out of the logs. For example, if you\n redact the HEADER field, the HEADER field in the firehose will be\n xxx.

\n \n

You must use one of the following values: URI,\n QUERY_STRING, HEADER, or METHOD.

\n
" + "smithy.api#documentation": "

The parts of the request that you want to keep out of the logs. For\n example, if you redact the SingleHeader field, the HEADER field in the firehose will be xxx.

\n \n

You can specify only the following fields for redaction: UriPath, QueryString, SingleHeader, Method, and JsonBody.

\n
" } }, "ManagedByFirewallManager": { @@ -5018,12 +5024,12 @@ "Description": { "target": "com.amazonaws.wafv2#EntityDescription", "traits": { - "smithy.api#documentation": "

The description of the managed rule group, provided by Amazon Web Services Managed Rules or the Marketplace seller who manages it.

" + "smithy.api#documentation": "

The description of the managed rule group, provided by Amazon Web Services Managed Rules or the Amazon Web Services Marketplace seller who manages it.

" } } }, "traits": { - "smithy.api#documentation": "

High-level information about a managed rule group, returned by ListAvailableManagedRuleGroups. This provides information like the name and vendor name, that you provide when you add a ManagedRuleGroupStatement to a web ACL. Managed rule groups include Amazon Web Services Managed Rules rule groups, which are free of charge to WAF customers, and Marketplace managed rule groups, which you can subscribe to through Marketplace.

" + "smithy.api#documentation": "

High-level information about a managed rule group, returned by ListAvailableManagedRuleGroups. This provides information like the name and vendor name, that you provide when you add a ManagedRuleGroupStatement to a web ACL. Managed rule groups include Amazon Web Services Managed Rules rule groups, which are free of charge to WAF customers, and Amazon Web Services Marketplace managed rule groups, which you can subscribe to through Amazon Web Services Marketplace.

" } }, "com.amazonaws.wafv2#ManagedRuleGroupVersion": { @@ -5102,7 +5108,7 @@ } }, "traits": { - "smithy.api#documentation": "

A set of rules that is managed by Amazon Web Services and Marketplace sellers to provide versioned managed\n rule groups for customers of WAF.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" + "smithy.api#documentation": "

A set of rules that is managed by Amazon Web Services and Amazon Web Services Marketplace sellers to provide versioned managed\n rule groups for customers of WAF.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" } }, "com.amazonaws.wafv2#ManagedRuleSetSummaries": { @@ -5152,7 +5158,7 @@ } }, "traits": { - "smithy.api#documentation": "

High-level information for a managed rule set.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" + "smithy.api#documentation": "

High-level information for a managed rule set.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" } }, "com.amazonaws.wafv2#ManagedRuleSetVersion": { @@ -5196,14 +5202,14 @@ } }, "traits": { - "smithy.api#documentation": "

Information for a single version of a managed rule set.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" + "smithy.api#documentation": "

Information for a single version of a managed rule set.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" } }, "com.amazonaws.wafv2#Method": { "type": "structure", "members": {}, "traits": { - "smithy.api#documentation": "

The HTTP method of a web request. The method indicates the type of operation that the\n request is asking the origin to perform.

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

" + "smithy.api#documentation": "

The HTTP method of a web request. The method indicates the type of operation that the request is asking the origin to perform.

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

\n

JSON specification: \"Method\": {}\n

" } }, "com.amazonaws.wafv2#MetricName": { @@ -5230,7 +5236,7 @@ "type": "structure", "members": {}, "traits": { - "smithy.api#documentation": "

Specifies that WAF should do nothing. This is generally used to try out a rule\n without performing any actions. You set the OverrideAction on the Rule.

\n

This is used in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

" + "smithy.api#documentation": "

Specifies that WAF should do nothing. This is generally used to try out a rule\n without performing any actions. You set the OverrideAction on the Rule.

\n

This is used in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.

\n

JSON specification: \"None\": {}\n

" } }, "com.amazonaws.wafv2#NotStatement": { @@ -5508,6 +5514,18 @@ { "value": "FILTER_CONDITION", "name": "FILTER_CONDITION" + }, + { + "value": "EXPIRE_TIMESTAMP", + "name": "EXPIRE_TIMESTAMP" + }, + { + "value": "CHANGE_PROPAGATION_STATUS", + "name": "CHANGE_PROPAGATION_STATUS" + }, + { + "value": "ASSOCIABLE_RESOURCE", + "name": "ASSOCIABLE_RESOURCE" } ] } @@ -5653,7 +5671,7 @@ } ], "traits": { - "smithy.api#documentation": "

Defines the versions of your managed rule set that you are offering to the customers.\n Customers see your offerings as managed rule groups with versioning.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
\n

Customers retrieve their managed rule group list by calling ListAvailableManagedRuleGroups. The name that you provide here for your\n managed rule set is the name the customer sees for the corresponding managed rule group.\n Customers can retrieve the available versions for a managed rule group by calling ListAvailableManagedRuleGroupVersions. You provide a rule group\n specification for each version. For each managed rule set, you must specify a version that\n you recommend using.

\n

To initiate the expiration of a managed rule group version, use UpdateManagedRuleSetVersionExpiryDate.

" + "smithy.api#documentation": "

Defines the versions of your managed rule set that you are offering to the customers.\n Customers see your offerings as managed rule groups with versioning.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
\n

Customers retrieve their managed rule group list by calling ListAvailableManagedRuleGroups. The name that you provide here for your\n managed rule set is the name the customer sees for the corresponding managed rule group.\n Customers can retrieve the available versions for a managed rule group by calling ListAvailableManagedRuleGroupVersions. You provide a rule group\n specification for each version. For each managed rule set, you must specify a version that\n you recommend using.

\n

To initiate the expiration of a managed rule group version, use UpdateManagedRuleSetVersionExpiryDate.

" } }, "com.amazonaws.wafv2#PutManagedRuleSetVersionsRequest": { @@ -5765,7 +5783,7 @@ "type": "structure", "members": {}, "traits": { - "smithy.api#documentation": "

The query string of a web request. This is the part of a URL that appears after a\n ? character, if any.

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

" + "smithy.api#documentation": "

The query string of a web request. This is the part of a URL that appears after a ? character, if any.

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

\n

JSON specification: \"QueryString\": {}\n

" } }, "com.amazonaws.wafv2#RateBasedStatement": { @@ -5799,7 +5817,7 @@ } }, "traits": { - "smithy.api#documentation": "

A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.

\n

When the rule action triggers, WAF blocks additional requests from the IP address until the request rate falls below the limit.

\n

You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:

\n \n

In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.

\n

You cannot nest a RateBasedStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

" + "smithy.api#documentation": "

A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.

\n

WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by WAF. If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by WAF.

\n

When the rule action triggers, WAF blocks additional requests from the IP address until the request rate falls below the limit.

\n

You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:

\n \n

In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.

\n

You cannot nest a RateBasedStatement inside another statement, for example inside a NotStatement or OrStatement. You can define a RateBasedStatement inside a web ACL and inside a rule group.

" } }, "com.amazonaws.wafv2#RateBasedStatementAggregateKeyType": { @@ -5834,7 +5852,7 @@ } }, "traits": { - "smithy.api#documentation": "

The set of IP addresses that are currently blocked for a rate-based statement.

" + "smithy.api#documentation": "

The set of IP addresses that are currently blocked for a RateBasedStatement.

" } }, "com.amazonaws.wafv2#RateLimit": { @@ -6250,7 +6268,7 @@ } }, "traits": { - "smithy.api#documentation": "

A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.

\n

You cannot nest a RuleGroupReferenceStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

" + "smithy.api#documentation": "

A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.

\n

You cannot nest a RuleGroupReferenceStatement, for example for use inside a NotStatement or OrStatement. You \n can only use a rule group reference statement at the top level inside a web ACL.

" } }, "com.amazonaws.wafv2#RuleGroupSummaries": { @@ -6439,7 +6457,7 @@ } }, "traits": { - "smithy.api#documentation": "

One of the headers in a web request, identified by name, for example,\n User-Agent or Referer. This setting isn't case\n sensitive.

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

" + "smithy.api#documentation": "

One of the headers in a web request, identified by name, for example,\n User-Agent or Referer. This setting isn't case\n sensitive.

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

\n

Example JSON: \"SingleHeader\": { \"Name\": \"haystack\" }\n

" } }, "com.amazonaws.wafv2#SingleQueryArgument": { @@ -6454,7 +6472,7 @@ } }, "traits": { - "smithy.api#documentation": "

One query argument in a web request, identified by name, for example\n UserName or SalesRegion. The name can be up to\n 30 characters long and isn't case sensitive.

" + "smithy.api#documentation": "

One query argument in a web request, identified by name, for example\n UserName or SalesRegion. The name can be up to\n 30 characters long and isn't case sensitive.

\n

Example JSON: \"SingleQueryArgument\": { \"Name\": \"myArgument\" }\n

" } }, "com.amazonaws.wafv2#Size": { @@ -6560,7 +6578,7 @@ "RuleGroupReferenceStatement": { "target": "com.amazonaws.wafv2#RuleGroupReferenceStatement", "traits": { - "smithy.api#documentation": "

A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.

\n

You cannot nest a RuleGroupReferenceStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

" + "smithy.api#documentation": "

A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.

\n

You cannot nest a RuleGroupReferenceStatement, for example for use inside a NotStatement or OrStatement. You \n can only use a rule group reference statement at the top level inside a web ACL.

" } }, "IPSetReferenceStatement": { @@ -6578,7 +6596,7 @@ "RateBasedStatement": { "target": "com.amazonaws.wafv2#RateBasedStatement", "traits": { - "smithy.api#documentation": "

A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.

\n

When the rule action triggers, WAF blocks additional requests from the IP address until the request rate falls below the limit.

\n

You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:

\n \n

In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.

\n

You cannot nest a RateBasedStatement, for example for use inside a NotStatement or OrStatement. It can only be referenced as a top-level statement within a rule.

" + "smithy.api#documentation": "

A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.

\n

WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by WAF. If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by WAF.

\n

When the rule action triggers, WAF blocks additional requests from the IP address until the request rate falls below the limit.

\n

You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:

\n \n

In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.

\n

You cannot nest a RateBasedStatement inside another statement, for example inside a NotStatement or OrStatement. You can define a RateBasedStatement inside a web ACL and inside a rule group.

" } }, "AndStatement": { @@ -6613,7 +6631,7 @@ } }, "traits": { - "smithy.api#documentation": "

The processing guidance for a Rule, used by WAF to determine\n whether a web request matches the rule.

" + "smithy.api#documentation": "

The processing guidance for a Rule, used by WAF to determine whether a web request matches the rule.

" } }, "com.amazonaws.wafv2#Statements": { @@ -7106,7 +7124,7 @@ } ], "traits": { - "smithy.api#documentation": "

Updates the expiration information for your managed rule set. Use this to initiate the\n expiration of a managed rule group version. After you initiate expiration for a version,\n WAF excludes it from the reponse to ListAvailableManagedRuleGroupVersions for the managed rule group.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" + "smithy.api#documentation": "

Updates the expiration information for your managed rule set. Use this to initiate the\n expiration of a managed rule group version. After you initiate expiration for a version,\n WAF excludes it from the reponse to ListAvailableManagedRuleGroupVersions for the managed rule group.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" } }, "com.amazonaws.wafv2#UpdateManagedRuleSetVersionExpiryDateRequest": { @@ -7508,7 +7526,7 @@ "type": "structure", "members": {}, "traits": { - "smithy.api#documentation": "

The path component of the URI of a web request. This is the part of a web request that identifies a resource. For example, /images/daily-ad.jpg.

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

" + "smithy.api#documentation": "

The path component of the URI of a web request. This is the part of a web request that identifies a resource. For example, /images/daily-ad.jpg.

\n

This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.

\n

JSON specification: \"UriPath\": {}\n

" } }, "com.amazonaws.wafv2#VendorName": { @@ -7548,7 +7566,7 @@ } }, "traits": { - "smithy.api#documentation": "

A version of the named managed rule group, that the rule group's vendor publishes for\n use by customers.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" + "smithy.api#documentation": "

A version of the named managed rule group, that the rule group's vendor publishes for\n use by customers.

\n \n

This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.

\n

Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate.

\n
" } }, "com.amazonaws.wafv2#VersionsToPublish": { diff --git a/codegen/smithy-aws-go-codegen/src/main/resources/software/amazon/smithy/aws/go/codegen/endpoints.json b/codegen/smithy-aws-go-codegen/src/main/resources/software/amazon/smithy/aws/go/codegen/endpoints.json index 66353167575..f8e122bf07a 100644 --- a/codegen/smithy-aws-go-codegen/src/main/resources/software/amazon/smithy/aws/go/codegen/endpoints.json +++ b/codegen/smithy-aws-go-codegen/src/main/resources/software/amazon/smithy/aws/go/codegen/endpoints.json @@ -4551,6 +4551,25 @@ "us-west-2" : { } } }, + "mediapackage-vod" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "mediastore" : { "endpoints" : { "ap-northeast-1" : { }, @@ -4621,6 +4640,19 @@ "us-east-1" : { } } }, + "models-v2-lex" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, "models.lex" : { "defaults" : { "credentialScope" : { @@ -4871,6 +4903,61 @@ } } }, + "network-firewall" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "network-firewall-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "network-firewall-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "network-firewall-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "network-firewall-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "network-firewall-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "oidc" : { "endpoints" : { "ap-northeast-1" : { @@ -5616,6 +5703,19 @@ "us-west-2" : { } } }, + "runtime-v2-lex" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, "runtime.lex" : { "defaults" : { "credentialScope" : { @@ -7140,6 +7240,30 @@ "eu-west-1" : { }, "eu-west-2" : { }, "sa-east-1" : { }, + "transcribestreaming-fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "transcribestreaming-fips.ca-central-1.amazonaws.com" + }, + "transcribestreaming-fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "transcribestreaming-fips.us-east-1.amazonaws.com" + }, + "transcribestreaming-fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "transcribestreaming-fips.us-east-2.amazonaws.com" + }, + "transcribestreaming-fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "transcribestreaming-fips.us-west-2.amazonaws.com" + }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -9686,6 +9810,34 @@ } } }, + "network-firewall" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "network-firewall-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "network-firewall-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, + "oidc" : { + "endpoints" : { + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "oidc.us-gov-west-1.amazonaws.com" + } + } + }, "organizations" : { "endpoints" : { "aws-us-gov-global" : { diff --git a/service/chime/api_op_CreateSipMediaApplicationCall.go b/service/chime/api_op_CreateSipMediaApplicationCall.go index ccff84c9d8e..7b1158b5068 100644 --- a/service/chime/api_op_CreateSipMediaApplicationCall.go +++ b/service/chime/api_op_CreateSipMediaApplicationCall.go @@ -46,6 +46,9 @@ type CreateSipMediaApplicationCallInput struct { // This member is required. ToPhoneNumber *string + // The SIP headers added to an outbound call leg. + SipHeaders map[string]string + noSmithyDocumentSerde } diff --git a/service/chime/api_op_StartMeetingTranscription.go b/service/chime/api_op_StartMeetingTranscription.go index f5114639564..179198df21e 100644 --- a/service/chime/api_op_StartMeetingTranscription.go +++ b/service/chime/api_op_StartMeetingTranscription.go @@ -11,7 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Start transcription for the specified meetingId. +// Starts transcription for the specified meetingId. func (c *Client) StartMeetingTranscription(ctx context.Context, params *StartMeetingTranscriptionInput, optFns ...func(*Options)) (*StartMeetingTranscriptionOutput, error) { if params == nil { params = &StartMeetingTranscriptionInput{} diff --git a/service/chime/deserializers.go b/service/chime/deserializers.go index b253f3d410a..f7926a9bd9a 100644 --- a/service/chime/deserializers.go +++ b/service/chime/deserializers.go @@ -5231,6 +5231,9 @@ func awsRestjson1_deserializeOpErrorCreateSipMediaApplicationCall(response *smit } switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + case strings.EqualFold("BadRequestException", errorCode): return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) diff --git a/service/chime/serializers.go b/service/chime/serializers.go index 558f25428f3..e8d905fcd4c 100644 --- a/service/chime/serializers.go +++ b/service/chime/serializers.go @@ -2807,6 +2807,13 @@ func awsRestjson1_serializeOpDocumentCreateSipMediaApplicationCallInput(v *Creat ok.String(*v.FromPhoneNumber) } + if v.SipHeaders != nil { + ok := object.Key("SipHeaders") + if err := awsRestjson1_serializeDocumentSipHeadersMap(v.SipHeaders, ok); err != nil { + return err + } + } + if v.ToPhoneNumber != nil { ok := object.Key("ToPhoneNumber") ok.String(*v.ToPhoneNumber) @@ -14571,6 +14578,17 @@ func awsRestjson1_serializeDocumentSigninDelegateGroupList(v []types.SigninDeleg return nil } +func awsRestjson1_serializeDocumentSipHeadersMap(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} + func awsRestjson1_serializeDocumentSipMediaApplicationEndpoint(v *types.SipMediaApplicationEndpoint, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/chime/types/types.go b/service/chime/types/types.go index aec82f7d916..f64432f1697 100644 --- a/service/chime/types/types.go +++ b/service/chime/types/types.go @@ -949,15 +949,18 @@ type OrderedPhoneNumber struct { } // Origination settings enable your SIP hosts to receive inbound calls using your -// Amazon Chime Voice Connector. +// Amazon Chime Voice Connector. The parameters listed below are not required, but +// you must use at least one. type Origination struct { // When origination settings are disabled, inbound calls are not enabled for your - // Amazon Chime Voice Connector. + // Amazon Chime Voice Connector. This parameter is not required, but you must + // specify this parameter or Routes. Disabled *bool // The call distribution properties defined for your SIP hosts. Valid range: - // Minimum value of 1. Maximum value of 20. + // Minimum value of 1. Maximum value of 20. This parameter is not required, but you + // must specify this parameter or Disabled. Routes []OriginationRoute noSmithyDocumentSerde @@ -965,7 +968,8 @@ type Origination struct { // Origination routes define call distribution properties for your SIP hosts to // receive inbound calls using your Amazon Chime Voice Connector. Limit: Ten -// origination routes for each Amazon Chime Voice Connector. +// origination routes for each Amazon Chime Voice Connector. The parameters listed +// below are not required, but you must use at least one. type OriginationRoute struct { // The FQDN or IP address to contact for origination traffic. @@ -1508,7 +1512,7 @@ type TerminationHealth struct { // EngineTranscribeSettings or EngineTranscribeMedicalSettings. type TranscriptionConfiguration struct { - // The transcription configuration settings passed to Amazon Transcribe. + // The transcription configuration settings passed to Amazon Transcribe Medical. EngineTranscribeMedicalSettings *EngineTranscribeMedicalSettings // The transcription configuration settings passed to Amazon Transcribe. diff --git a/service/cloudformation/api_op_DescribeStackResourceDrifts.go b/service/cloudformation/api_op_DescribeStackResourceDrifts.go index 5bc495bf553..843fbf7913e 100644 --- a/service/cloudformation/api_op_DescribeStackResourceDrifts.go +++ b/service/cloudformation/api_op_DescribeStackResourceDrifts.go @@ -16,7 +16,7 @@ import ( // the specified stack. This includes actual and expected configuration values for // resources where CloudFormation detects configuration drift. For a given stack, // there will be one StackResourceDrift for each stack resource that has been -// checked for drift. Resources that have not yet been checked for drift are not +// checked for drift. Resources that haven't yet been checked for drift are not // included. Resources that do not currently support drift detection are not // checked, and so not included. For a list of resources that support drift // detection, see Resources that Support Drift Detection diff --git a/service/cloudformation/api_op_DescribeStacks.go b/service/cloudformation/api_op_DescribeStacks.go index f37b0ea171b..9f950e2cf41 100644 --- a/service/cloudformation/api_op_DescribeStacks.go +++ b/service/cloudformation/api_op_DescribeStacks.go @@ -20,7 +20,7 @@ import ( // Returns the description for the specified stack; if no stack name was specified, // then it returns the description for all the stacks created. If the stack does -// not exist, an AmazonCloudFormationException is returned. +// not exist, an ValidationError is returned. func (c *Client) DescribeStacks(ctx context.Context, params *DescribeStacksInput, optFns ...func(*Options)) (*DescribeStacksOutput, error) { if params == nil { params = &DescribeStacksInput{} diff --git a/service/cloudformation/types/types.go b/service/cloudformation/types/types.go index 02dfe104920..ef8af7c96f3 100644 --- a/service/cloudformation/types/types.go +++ b/service/cloudformation/types/types.go @@ -10,7 +10,7 @@ import ( // Structure that contains the results of the account gate function which // CloudFormation invokes, if present, before proceeding with a stack set operation // in an account and Region. For each account and Region, CloudFormation lets you -// specify a Lamdba function that encapsulates any requirements that must be met +// specify a Lambda function that encapsulates any requirements that must be met // before CloudFormation can proceed with a stack set operation in that account and // Region. CloudFormation invokes the function each time a stack set operation is // requested for that account and Region; if the function returns FAILED, diff --git a/service/comprehend/deserializers.go b/service/comprehend/deserializers.go index 41ec239fb52..de44bed2459 100644 --- a/service/comprehend/deserializers.go +++ b/service/comprehend/deserializers.go @@ -8048,11 +8048,29 @@ func awsAwsjson11_deserializeDocumentAugmentedManifestsListItem(v **types.Augmen for key, value := range shape { switch key { + case "AnnotationDataS3Uri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Uri to be of type string, got %T instead", value) + } + sv.AnnotationDataS3Uri = ptr.String(jtv) + } + case "AttributeNames": if err := awsAwsjson11_deserializeDocumentAttributeNamesList(&sv.AttributeNames, value); err != nil { return err } + case "DocumentType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AugmentedManifestsDocumentTypeFormat to be of type string, got %T instead", value) + } + sv.DocumentType = types.AugmentedManifestsDocumentTypeFormat(jtv) + } + case "S3Uri": if value != nil { jtv, ok := value.(string) @@ -8062,6 +8080,15 @@ func awsAwsjson11_deserializeDocumentAugmentedManifestsListItem(v **types.Augmen sv.S3Uri = ptr.String(jtv) } + case "SourceDocumentsS3Uri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3Uri to be of type string, got %T instead", value) + } + sv.SourceDocumentsS3Uri = ptr.String(jtv) + } + default: _, _ = key, value @@ -9578,6 +9605,60 @@ func awsAwsjson11_deserializeDocumentDocumentLabel(v **types.DocumentLabel, valu return nil } +func awsAwsjson11_deserializeDocumentDocumentReaderConfig(v **types.DocumentReaderConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DocumentReaderConfig + if *v == nil { + sv = &types.DocumentReaderConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DocumentReadAction": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DocumentReadAction to be of type string, got %T instead", value) + } + sv.DocumentReadAction = types.DocumentReadAction(jtv) + } + + case "DocumentReadMode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DocumentReadMode to be of type string, got %T instead", value) + } + sv.DocumentReadMode = types.DocumentReadMode(jtv) + } + + case "FeatureTypes": + if err := awsAwsjson11_deserializeDocumentListOfDocumentReadFeatureTypes(&sv.FeatureTypes, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentDominantLanguage(v **types.DominantLanguage, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -11493,6 +11574,11 @@ func awsAwsjson11_deserializeDocumentInputDataConfig(v **types.InputDataConfig, for key, value := range shape { switch key { + case "DocumentReaderConfig": + if err := awsAwsjson11_deserializeDocumentDocumentReaderConfig(&sv.DocumentReaderConfig, value); err != nil { + return err + } + case "InputFormat": if value != nil { jtv, ok := value.(string) @@ -12208,6 +12294,42 @@ func awsAwsjson11_deserializeDocumentListOfDetectSyntaxResult(v *[]types.BatchDe return nil } +func awsAwsjson11_deserializeDocumentListOfDocumentReadFeatureTypes(v *[]types.DocumentReadFeatureTypes, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DocumentReadFeatureTypes + if *v == nil { + cv = []types.DocumentReadFeatureTypes{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DocumentReadFeatureTypes + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DocumentReadFeatureTypes to be of type string, got %T instead", value) + } + col = types.DocumentReadFeatureTypes(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsAwsjson11_deserializeDocumentListOfDominantLanguages(v *[]types.DominantLanguage, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/comprehend/serializers.go b/service/comprehend/serializers.go index 34c66c2aecd..480ad23fdbb 100644 --- a/service/comprehend/serializers.go +++ b/service/comprehend/serializers.go @@ -2896,6 +2896,11 @@ func awsAwsjson11_serializeDocumentAugmentedManifestsListItem(v *types.Augmented object := value.Object() defer object.Close() + if v.AnnotationDataS3Uri != nil { + ok := object.Key("AnnotationDataS3Uri") + ok.String(*v.AnnotationDataS3Uri) + } + if v.AttributeNames != nil { ok := object.Key("AttributeNames") if err := awsAwsjson11_serializeDocumentAttributeNamesList(v.AttributeNames, ok); err != nil { @@ -2903,11 +2908,21 @@ func awsAwsjson11_serializeDocumentAugmentedManifestsListItem(v *types.Augmented } } + if len(v.DocumentType) > 0 { + ok := object.Key("DocumentType") + ok.String(string(v.DocumentType)) + } + if v.S3Uri != nil { ok := object.Key("S3Uri") ok.String(*v.S3Uri) } + if v.SourceDocumentsS3Uri != nil { + ok := object.Key("SourceDocumentsS3Uri") + ok.String(*v.SourceDocumentsS3Uri) + } + return nil } @@ -3030,6 +3045,30 @@ func awsAwsjson11_serializeDocumentDocumentClassifierOutputDataConfig(v *types.D return nil } +func awsAwsjson11_serializeDocumentDocumentReaderConfig(v *types.DocumentReaderConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.DocumentReadAction) > 0 { + ok := object.Key("DocumentReadAction") + ok.String(string(v.DocumentReadAction)) + } + + if len(v.DocumentReadMode) > 0 { + ok := object.Key("DocumentReadMode") + ok.String(string(v.DocumentReadMode)) + } + + if v.FeatureTypes != nil { + ok := object.Key("FeatureTypes") + if err := awsAwsjson11_serializeDocumentListOfDocumentReadFeatureTypes(v.FeatureTypes, ok); err != nil { + return err + } + } + + return nil +} + func awsAwsjson11_serializeDocumentDominantLanguageDetectionJobFilter(v *types.DominantLanguageDetectionJobFilter, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3285,6 +3324,13 @@ func awsAwsjson11_serializeDocumentInputDataConfig(v *types.InputDataConfig, val object := value.Object() defer object.Close() + if v.DocumentReaderConfig != nil { + ok := object.Key("DocumentReaderConfig") + if err := awsAwsjson11_serializeDocumentDocumentReaderConfig(v.DocumentReaderConfig, ok); err != nil { + return err + } + } + if len(v.InputFormat) > 0 { ok := object.Key("InputFormat") ok.String(string(v.InputFormat)) @@ -3325,6 +3371,17 @@ func awsAwsjson11_serializeDocumentKeyPhrasesDetectionJobFilter(v *types.KeyPhra return nil } +func awsAwsjson11_serializeDocumentListOfDocumentReadFeatureTypes(v []types.DocumentReadFeatureTypes, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + func awsAwsjson11_serializeDocumentListOfPiiEntityTypes(v []types.PiiEntityType, value smithyjson.Value) error { array := value.Array() defer array.Close() diff --git a/service/comprehend/types/enums.go b/service/comprehend/types/enums.go index 9db20ed466a..ce85d4678d2 100644 --- a/service/comprehend/types/enums.go +++ b/service/comprehend/types/enums.go @@ -2,6 +2,25 @@ package types +type AugmentedManifestsDocumentTypeFormat string + +// Enum values for AugmentedManifestsDocumentTypeFormat +const ( + AugmentedManifestsDocumentTypeFormatPlainTextDocument AugmentedManifestsDocumentTypeFormat = "PLAIN_TEXT_DOCUMENT" + AugmentedManifestsDocumentTypeFormatSemiStructuredDocument AugmentedManifestsDocumentTypeFormat = "SEMI_STRUCTURED_DOCUMENT" +) + +// Values returns all known values for AugmentedManifestsDocumentTypeFormat. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (AugmentedManifestsDocumentTypeFormat) Values() []AugmentedManifestsDocumentTypeFormat { + return []AugmentedManifestsDocumentTypeFormat{ + "PLAIN_TEXT_DOCUMENT", + "SEMI_STRUCTURED_DOCUMENT", + } +} + type DocumentClassifierDataFormat string // Enum values for DocumentClassifierDataFormat @@ -38,6 +57,60 @@ func (DocumentClassifierMode) Values() []DocumentClassifierMode { } } +type DocumentReadAction string + +// Enum values for DocumentReadAction +const ( + DocumentReadActionTextractDetectDocumentText DocumentReadAction = "TEXTRACT_DETECT_DOCUMENT_TEXT" + DocumentReadActionTextractAnalyzeDocument DocumentReadAction = "TEXTRACT_ANALYZE_DOCUMENT" +) + +// Values returns all known values for DocumentReadAction. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (DocumentReadAction) Values() []DocumentReadAction { + return []DocumentReadAction{ + "TEXTRACT_DETECT_DOCUMENT_TEXT", + "TEXTRACT_ANALYZE_DOCUMENT", + } +} + +type DocumentReadFeatureTypes string + +// Enum values for DocumentReadFeatureTypes +const ( + DocumentReadFeatureTypesTables DocumentReadFeatureTypes = "TABLES" + DocumentReadFeatureTypesForms DocumentReadFeatureTypes = "FORMS" +) + +// Values returns all known values for DocumentReadFeatureTypes. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (DocumentReadFeatureTypes) Values() []DocumentReadFeatureTypes { + return []DocumentReadFeatureTypes{ + "TABLES", + "FORMS", + } +} + +type DocumentReadMode string + +// Enum values for DocumentReadMode +const ( + DocumentReadModeServiceDefault DocumentReadMode = "SERVICE_DEFAULT" + DocumentReadModeForceDocumentReadAction DocumentReadMode = "FORCE_DOCUMENT_READ_ACTION" +) + +// Values returns all known values for DocumentReadMode. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (DocumentReadMode) Values() []DocumentReadMode { + return []DocumentReadMode{ + "SERVICE_DEFAULT", + "FORCE_DOCUMENT_READ_ACTION", + } +} + type EndpointStatus string // Enum values for EndpointStatus diff --git a/service/comprehend/types/types.go b/service/comprehend/types/types.go index 1e46494b538..63c4192496f 100644 --- a/service/comprehend/types/types.go +++ b/service/comprehend/types/types.go @@ -29,6 +29,26 @@ type AugmentedManifestsListItem struct { // This member is required. S3Uri *string + // The S3 prefix to the annotation files that are referred in the augmented + // manifest file. + AnnotationDataS3Uri *string + + // The type of augmented manifest. PlainTextDocument or SemiStructuredDocument. If + // you don't specify, the default is PlainTextDocument. + // + // * PLAIN_TEXT_DOCUMENT A + // document type that represents any unicode text that is encoded in UTF-8. + // + // * + // SEMI_STRUCTURED_DOCUMENT A document type with positional and structural context, + // like a PDF. For training with Amazon Comprehend, only PDFs are supported. For + // inference, Amazon Comprehend support PDFs, DOCX and TXT. + DocumentType AugmentedManifestsDocumentTypeFormat + + // The S3 prefix to the source files (PDFs) that are referred to in the augmented + // manifest file. + SourceDocumentsS3Uri *string + noSmithyDocumentSerde } @@ -499,6 +519,37 @@ type DocumentLabel struct { noSmithyDocumentSerde } +// The input properties for a topic detection job. +type DocumentReaderConfig struct { + + // This enum field will start with two values which will apply to PDFs: + // + // * + // TEXTRACT_DETECT_DOCUMENT_TEXT - The service calls DetectDocumentText for PDF + // documents per page. + // + // * TEXTRACT_ANALYZE_DOCUMENT - The service calls + // AnalyzeDocument for PDF documents per page. + // + // This member is required. + DocumentReadAction DocumentReadAction + + // This enum field provides two values: + // + // * SERVICE_DEFAULT - use service defaults + // for Document reading. For Digital PDF it would mean using an internal parser + // instead of Textract APIs + // + // * FORCE_DOCUMENT_READ_ACTION - Always use specified + // action for DocumentReadAction, including Digital PDF. + DocumentReadMode DocumentReadMode + + // Specifies how the text in an input file should be processed: + FeatureTypes []DocumentReadFeatureTypes + + noSmithyDocumentSerde +} + // Returns the code for the dominant language in the input text and the level of // confidence that Amazon Comprehend has in the accuracy of the detection. type DominantLanguage struct { @@ -1150,7 +1201,7 @@ type EventsDetectionJobProperties struct { noSmithyDocumentSerde } -// The input properties for a topic detection job. +// The input properties for an inference job. type InputDataConfig struct { // The Amazon S3 URI for the input data. The URI must be in same region as the API @@ -1163,6 +1214,12 @@ type InputDataConfig struct { // This member is required. S3Uri *string + // The document reader config field applies only for InputDataConfig of + // StartEntitiesDetectionJob. Use DocumentReaderConfig to provide specifications + // about how you want your inference documents read. Currently it applies for PDF + // documents in StartEntitiesDetectionJob custom inference. + DocumentReaderConfig *DocumentReaderConfig + // Specifies how the text in an input file should be processed: // // * ONE_DOC_PER_FILE diff --git a/service/comprehend/validators.go b/service/comprehend/validators.go index cf0d8113eb2..a21a3d16ca9 100644 --- a/service/comprehend/validators.go +++ b/service/comprehend/validators.go @@ -1262,6 +1262,21 @@ func validateDocumentClassifierInputDataConfig(v *types.DocumentClassifierInputD } } +func validateDocumentReaderConfig(v *types.DocumentReaderConfig) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DocumentReaderConfig"} + if len(v.DocumentReadAction) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("DocumentReadAction")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateEntityRecognizerAnnotations(v *types.EntityRecognizerAnnotations) error { if v == nil { return nil @@ -1403,6 +1418,11 @@ func validateInputDataConfig(v *types.InputDataConfig) error { if v.S3Uri == nil { invalidParams.Add(smithy.NewErrParamRequired("S3Uri")) } + if v.DocumentReaderConfig != nil { + if err := validateDocumentReaderConfig(v.DocumentReaderConfig); err != nil { + invalidParams.AddNested("DocumentReaderConfig", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { diff --git a/service/ec2/api_op_CopyImage.go b/service/ec2/api_op_CopyImage.go index 067a18e2eb1..55957557027 100644 --- a/service/ec2/api_op_CopyImage.go +++ b/service/ec2/api_op_CopyImage.go @@ -11,9 +11,9 @@ import ( ) // Initiates the copy of an AMI. You can copy an AMI from one Region to another, or -// from a Region to an AWS Outpost. You can't copy an AMI from an Outpost to a -// Region, from one Outpost to another, or within the same Outpost. To copy an AMI -// to another partition, see CreateStoreImageTask +// from a Region to an Outpost. You can't copy an AMI from an Outpost to a Region, +// from one Outpost to another, or within the same Outpost. To copy an AMI to +// another partition, see CreateStoreImageTask // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateStoreImageTask.html). // To copy an AMI from one Region to another, specify the source Region using // the @@ -79,11 +79,11 @@ type CopyImageInput struct { Description *string // The Amazon Resource Name (ARN) of the Outpost to which to copy the AMI. Only - // specify this parameter when copying an AMI from an AWS Region to an Outpost. The - // AMI must be in the Region of the destination Outpost. You cannot copy an AMI - // from an Outpost to a Region, from one Outpost to another, or within the same - // Outpost. For more information, see Copying AMIs from an AWS Region to an - // Outpost + // specify this parameter when copying an AMI from an Amazon Web Services Region to + // an Outpost. The AMI must be in the Region of the destination Outpost. You cannot + // copy an AMI from an Outpost to a Region, from one Outpost to another, or within + // the same Outpost. For more information, see Copying AMIs from an Amazon Web + // Services Region to an Outpost // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#copy-amis) // in the Amazon Elastic Compute Cloud User Guide. DestinationOutpostArn *string @@ -96,23 +96,24 @@ type CopyImageInput struct { // Specifies whether the destination snapshots of the copied image should be // encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot - // create an unencrypted copy of an encrypted snapshot. The default CMK for EBS is - // used unless you specify a non-default AWS Key Management Service (AWS KMS) CMK - // using KmsKeyId. For more information, see Amazon EBS Encryption + // create an unencrypted copy of an encrypted snapshot. The default KMS key for + // Amazon EBS is used unless you specify a non-default Key Management Service (KMS) + // KMS key using KmsKeyId. For more information, see Amazon EBS Encryption // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) in the // Amazon Elastic Compute Cloud User Guide. Encrypted *bool - // The identifier of the symmetric AWS Key Management Service (AWS KMS) customer - // master key (CMK) to use when creating encrypted volumes. If this parameter is - // not specified, your AWS managed CMK for EBS is used. If you specify a CMK, you - // must also set the encrypted state to true. You can specify a CMK using any of - // the following: + // The identifier of the symmetric Key Management Service (KMS) KMS key to use when + // creating encrypted volumes. If this parameter is not specified, your Amazon Web + // Services managed KMS key for Amazon EBS is used. If you specify a KMS key, you + // must also set the encrypted state to true. You can specify a KMS key using any + // of the following: // - // * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. + // * Key ID. For example, + // 1234abcd-12ab-34cd-56ef-1234567890ab. // - // * - // Key alias. For example, alias/ExampleAlias. + // * Key alias. For example, + // alias/ExampleAlias. // // * Key ARN. For example, // arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. @@ -121,10 +122,11 @@ type CopyImageInput struct { // Alias ARN. For example, // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // - // AWS authenticates the - // CMK asynchronously. Therefore, if you specify an identifier that is not valid, - // the action can appear to complete, but eventually fails. The specified CMK must - // exist in the destination Region. Amazon EBS does not support asymmetric CMKs. + // Amazon Web Services + // authenticates the KMS key asynchronously. Therefore, if you specify an + // identifier that is not valid, the action can appear to complete, but eventually + // fails. The specified KMS key must exist in the destination Region. Amazon EBS + // does not support asymmetric KMS keys. KmsKeyId *string noSmithyDocumentSerde diff --git a/service/ec2/api_op_CreateImage.go b/service/ec2/api_op_CreateImage.go index 22026a552e8..88983acdcd7 100644 --- a/service/ec2/api_op_CreateImage.go +++ b/service/ec2/api_op_CreateImage.go @@ -13,7 +13,7 @@ import ( // Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance that is // either running or stopped. If you customized your instance with instance store -// volumes or EBS volumes in addition to the root device volume, the new AMI +// volumes or Amazon EBS volumes in addition to the root device volume, the new AMI // contains block device mapping information for those volumes. When you launch an // instance from this new AMI, the instance automatically launches with those // additional volumes. For more information, see Creating Amazon EBS-Backed Linux @@ -65,8 +65,10 @@ type CreateImageInput struct { // By default, Amazon EC2 attempts to shut down and reboot the instance before // creating the image. If the No Reboot option is set, Amazon EC2 doesn't shut down - // the instance before creating the image. When this option is used, file system - // integrity on the created image can't be guaranteed. + // the instance before creating the image. Without a reboot, the AMI will be crash + // consistent (all the volumes are snapshotted at the same time), but not + // application consistent (all the operating system buffers are not flushed to disk + // before the snapshots are created). NoReboot *bool // The tags to apply to the AMI and snapshots on creation. You can tag the AMI, the @@ -76,8 +78,8 @@ type CreateImageInput struct { // image. // // * To tag the snapshots that are created of the root volume and of other - // EBS volumes that are attached to the instance, the value for ResourceType must - // be snapshot. The same tag is applied to all of the snapshots that are + // Amazon EBS volumes that are attached to the instance, the value for ResourceType + // must be snapshot. The same tag is applied to all of the snapshots that are // created. // // If you specify other values for ResourceType, the request fails. To diff --git a/service/ec2/api_op_CreateManagedPrefixList.go b/service/ec2/api_op_CreateManagedPrefixList.go index 0c6932ce057..30146305347 100644 --- a/service/ec2/api_op_CreateManagedPrefixList.go +++ b/service/ec2/api_op_CreateManagedPrefixList.go @@ -14,8 +14,6 @@ import ( // Creates a managed prefix list. You can specify one or more entries for the // prefix list. Each entry consists of a CIDR block and an optional description. -// You must specify the maximum number of entries for the prefix list. The maximum -// number of entries cannot be changed later. func (c *Client) CreateManagedPrefixList(ctx context.Context, params *CreateManagedPrefixListInput, optFns ...func(*Options)) (*CreateManagedPrefixListOutput, error) { if params == nil { params = &CreateManagedPrefixListInput{} diff --git a/service/ec2/api_op_CreateRestoreImageTask.go b/service/ec2/api_op_CreateRestoreImageTask.go index 2078bd0f8dd..711f9ca1670 100644 --- a/service/ec2/api_op_CreateRestoreImageTask.go +++ b/service/ec2/api_op_CreateRestoreImageTask.go @@ -11,14 +11,14 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Starts a task that restores an AMI from an S3 object that was previously created -// by using CreateStoreImageTask +// Starts a task that restores an AMI from an Amazon S3 object that was previously +// created by using CreateStoreImageTask // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateStoreImageTask.html). // To use this API, you must have the required permissions. For more information, -// see Permissions for storing and restoring AMIs using S3 +// see Permissions for storing and restoring AMIs using Amazon S3 // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions) // in the Amazon Elastic Compute Cloud User Guide. For more information, see Store -// and restore an AMI using S3 +// and restore an AMI using Amazon S3 // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html) in // the Amazon Elastic Compute Cloud User Guide. func (c *Client) CreateRestoreImageTask(ctx context.Context, params *CreateRestoreImageTaskInput, optFns ...func(*Options)) (*CreateRestoreImageTaskOutput, error) { @@ -38,7 +38,7 @@ func (c *Client) CreateRestoreImageTask(ctx context.Context, params *CreateResto type CreateRestoreImageTaskInput struct { - // The name of the S3 bucket that contains the stored AMI object. + // The name of the Amazon S3 bucket that contains the stored AMI object. // // This member is required. Bucket *string diff --git a/service/ec2/api_op_CreateStoreImageTask.go b/service/ec2/api_op_CreateStoreImageTask.go index 10f2941cb3e..3b1b1e17949 100644 --- a/service/ec2/api_op_CreateStoreImageTask.go +++ b/service/ec2/api_op_CreateStoreImageTask.go @@ -11,12 +11,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Stores an AMI as a single object in an S3 bucket. To use this API, you must have -// the required permissions. For more information, see Permissions for storing and -// restoring AMIs using S3 +// Stores an AMI as a single object in an Amazon S3 bucket. To use this API, you +// must have the required permissions. For more information, see Permissions for +// storing and restoring AMIs using Amazon S3 // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions) // in the Amazon Elastic Compute Cloud User Guide. For more information, see Store -// and restore an AMI using S3 +// and restore an AMI using Amazon S3 // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html) in // the Amazon Elastic Compute Cloud User Guide. func (c *Client) CreateStoreImageTask(ctx context.Context, params *CreateStoreImageTaskInput, optFns ...func(*Options)) (*CreateStoreImageTaskOutput, error) { @@ -36,9 +36,9 @@ func (c *Client) CreateStoreImageTask(ctx context.Context, params *CreateStoreIm type CreateStoreImageTaskInput struct { - // The name of the S3 bucket in which the AMI object will be stored. The bucket - // must be in the Region in which the request is being made. The AMI object appears - // in the bucket only after the upload task has completed. + // The name of the Amazon S3 bucket in which the AMI object will be stored. The + // bucket must be in the Region in which the request is being made. The AMI object + // appears in the bucket only after the upload task has completed. // // This member is required. Bucket *string @@ -54,7 +54,7 @@ type CreateStoreImageTaskInput struct { // UnauthorizedOperation. DryRun *bool - // The tags to apply to the AMI object that will be stored in the S3 bucket. + // The tags to apply to the AMI object that will be stored in the Amazon S3 bucket. S3ObjectTags []types.S3ObjectTag noSmithyDocumentSerde diff --git a/service/ec2/api_op_DescribeImages.go b/service/ec2/api_op_DescribeImages.go index a988b850589..b85c727c43d 100644 --- a/service/ec2/api_op_DescribeImages.go +++ b/service/ec2/api_op_DescribeImages.go @@ -21,12 +21,12 @@ import ( // Describes the specified images (AMIs, AKIs, and ARIs) available to you or all of // the images available to you. The images available to you include public images, -// private images that you own, and private images owned by other AWS accounts for -// which you have explicit launch permissions. Recently deregistered images appear -// in the returned results for a short interval and then return empty results. -// After all instances that reference a deregistered AMI are terminated, specifying -// the ID of the image will eventually return an error indicating that the AMI ID -// cannot be found. +// private images that you own, and private images owned by other Amazon Web +// Services accounts for which you have explicit launch permissions. Recently +// deregistered images appear in the returned results for a short interval and then +// return empty results. After all instances that reference a deregistered AMI are +// terminated, specifying the ID of the image will eventually return an error +// indicating that the AMI ID cannot be found. func (c *Client) DescribeImages(ctx context.Context, params *DescribeImagesInput, optFns ...func(*Options)) (*DescribeImagesOutput, error) { if params == nil { params = &DescribeImagesInput{} @@ -50,8 +50,8 @@ type DescribeImagesInput struct { // UnauthorizedOperation. DryRun *bool - // Scopes the images by users with explicit launch permissions. Specify an AWS - // account ID, self (the sender of the request), or all (public AMIs). + // Scopes the images by users with explicit launch permissions. Specify an Amazon + // Web Services account ID, self (the sender of the request), or all (public AMIs). ExecutableUsers []string // The filters. @@ -67,50 +67,51 @@ type DescribeImagesInput struct { // mapping (for example, /dev/sdh or xvdh). // // * block-device-mapping.snapshot-id - - // The ID of the snapshot used for the EBS volume. + // The ID of the snapshot used for the Amazon EBS volume. // // * - // block-device-mapping.volume-size - The volume size of the EBS volume, in GiB. + // block-device-mapping.volume-size - The volume size of the Amazon EBS volume, in + // GiB. // - // * - // block-device-mapping.volume-type - The volume type of the EBS volume (gp2 | io1 - // | io2 | st1 | sc1 | standard). + // * block-device-mapping.volume-type - The volume type of the Amazon EBS + // volume (io1 | io2 | gp2 | gp3 | sc1 | st1 | standard). // - // * block-device-mapping.encrypted - A Boolean - // that indicates whether the EBS volume is encrypted. + // * + // block-device-mapping.encrypted - A Boolean that indicates whether the Amazon EBS + // volume is encrypted. // - // * description - The - // description of the image (provided during image creation). + // * description - The description of the image (provided + // during image creation). // - // * ena-support - A - // Boolean that indicates whether enhanced networking with ENA is enabled. + // * ena-support - A Boolean that indicates whether + // enhanced networking with ENA is enabled. // - // * - // hypervisor - The hypervisor type (ovm | xen). + // * hypervisor - The hypervisor type + // (ovm | xen). // - // * image-id - The ID of the - // image. + // * image-id - The ID of the image. // - // * image-type - The image type (machine | kernel | ramdisk). + // * image-type - The image type + // (machine | kernel | ramdisk). // - // * is-public - // - A Boolean that indicates whether the image is public. + // * is-public - A Boolean that indicates whether + // the image is public. // - // * kernel-id - The - // kernel ID. + // * kernel-id - The kernel ID. // - // * manifest-location - The location of the image manifest. + // * manifest-location - The + // location of the image manifest. // - // * name - - // The name of the AMI (provided during image creation). + // * name - The name of the AMI (provided during + // image creation). // - // * owner-alias - The owner - // alias (amazon | aws-marketplace). The valid aliases are defined in an - // Amazon-maintained list. This is not the AWS account alias that can be set using - // the IAM console. We recommend that you use the Owner request parameter instead - // of this filter. + // * owner-alias - The owner alias (amazon | aws-marketplace). + // The valid aliases are defined in an Amazon-maintained list. This is not the + // Amazon Web Services account alias that can be set using the IAM console. We + // recommend that you use the Owner request parameter instead of this filter. // - // * owner-id - The AWS account ID of the owner. We recommend that + // * + // owner-id - The Amazon Web Services account ID of the owner. We recommend that // you use the Owner request parameter instead of this filter. // // * platform - The @@ -119,8 +120,8 @@ type DescribeImagesInput struct { // * product-code - The // product code. // - // * product-code.type - The type of the product code (devpay | - // marketplace). + // * product-code.type - The type of the product code + // (marketplace). // // * ramdisk-id - The RAM disk ID. // @@ -167,9 +168,9 @@ type DescribeImagesInput struct { IncludeDeprecated *bool // Scopes the results to images with the specified owners. You can specify a - // combination of AWS account IDs, self, amazon, and aws-marketplace. If you omit - // this parameter, the results include all images for which you have launch - // permissions, regardless of ownership. + // combination of Amazon Web Services account IDs, self, amazon, and + // aws-marketplace. If you omit this parameter, the results include all images for + // which you have launch permissions, regardless of ownership. Owners []string noSmithyDocumentSerde diff --git a/service/ec2/api_op_DescribeInstanceTypes.go b/service/ec2/api_op_DescribeInstanceTypes.go index 820fa0efd2e..5844d377775 100644 --- a/service/ec2/api_op_DescribeInstanceTypes.go +++ b/service/ec2/api_op_DescribeInstanceTypes.go @@ -138,24 +138,25 @@ type DescribeInstanceTypesInput struct { // // * // network-info.encryption-in-transit-supported - Indicates whether the instance - // type automatically encrypts in-transit traffic between instances. + // type automatically encrypts in-transit traffic between instances (true | + // false). + // + // * network-info.ipv4-addresses-per-interface - The maximum number of + // private IPv4 addresses per network interface. // // * - // network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 + // network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 // addresses per network interface. // - // * network-info.ipv6-addresses-per-interface - - // The maximum number of private IPv6 addresses per network interface. + // * network-info.ipv6-supported - Indicates + // whether the instance type supports IPv6 (true | false). // // * - // network-info.ipv6-supported - Indicates whether the instance type supports IPv6 - // (true | false). - // - // * network-info.maximum-network-interfaces - The maximum number - // of network interfaces per instance. + // network-info.maximum-network-interfaces - The maximum number of network + // interfaces per instance. // - // * network-info.network-performance - The - // network performance (for example, "25 Gigabit"). + // * network-info.network-performance - The network + // performance (for example, "25 Gigabit"). // // * // processor-info.supported-architecture - The CPU architecture (arm64 | i386 | diff --git a/service/ec2/api_op_DescribeStoreImageTasks.go b/service/ec2/api_op_DescribeStoreImageTasks.go index 0ed16813165..650c30041fb 100644 --- a/service/ec2/api_op_DescribeStoreImageTasks.go +++ b/service/ec2/api_op_DescribeStoreImageTasks.go @@ -19,10 +19,10 @@ import ( // shows the estimated progress as a percentage. Tasks are listed in reverse // chronological order. Currently, only tasks from the past 31 days can be viewed. // To use this API, you must have the required permissions. For more information, -// see Permissions for storing and restoring AMIs using S3 +// see Permissions for storing and restoring AMIs using Amazon S3 // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html#ami-s3-permissions) // in the Amazon Elastic Compute Cloud User Guide. For more information, see Store -// and restore an AMI using S3 +// and restore an AMI using Amazon S3 // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-store-restore.html) in // the Amazon Elastic Compute Cloud User Guide. func (c *Client) DescribeStoreImageTasks(ctx context.Context, params *DescribeStoreImageTasksInput, optFns ...func(*Options)) (*DescribeStoreImageTasksOutput, error) { diff --git a/service/ec2/api_op_ModifyCapacityReservation.go b/service/ec2/api_op_ModifyCapacityReservation.go index a84365bb42e..5be4f26b757 100644 --- a/service/ec2/api_op_ModifyCapacityReservation.go +++ b/service/ec2/api_op_ModifyCapacityReservation.go @@ -71,7 +71,8 @@ type ModifyCapacityReservationInput struct { // EndDateType is limited. EndDateType types.EndDateType - // The number of instances for which to reserve capacity. Valid range: 1 - 1000 + // The number of instances for which to reserve capacity. The number of instances + // can't be increased or decreased by more than 1000 in a single request. InstanceCount *int32 noSmithyDocumentSerde diff --git a/service/ec2/api_op_ModifyImageAttribute.go b/service/ec2/api_op_ModifyImageAttribute.go index 2299ba224c5..c0c84d793b7 100644 --- a/service/ec2/api_op_ModifyImageAttribute.go +++ b/service/ec2/api_op_ModifyImageAttribute.go @@ -13,11 +13,10 @@ import ( // Modifies the specified attribute of the specified AMI. You can specify only one // attribute at a time. You can use the Attribute parameter to specify the -// attribute or one of the following parameters: Description, LaunchPermission, or -// ProductCode. AWS Marketplace product codes cannot be modified. Images with an -// AWS Marketplace product code cannot be made public. To enable the -// SriovNetSupport enhanced networking attribute of an image, enable -// SriovNetSupport on an instance and create an AMI from the instance. +// attribute or one of the following parameters: Description or LaunchPermission. +// Images with an Amazon Web Services Marketplace product code cannot be made +// public. To enable the SriovNetSupport enhanced networking attribute of an image, +// enable SriovNetSupport on an instance and create an AMI from the instance. func (c *Client) ModifyImageAttribute(ctx context.Context, params *ModifyImageAttributeInput, optFns ...func(*Options)) (*ModifyImageAttributeOutput, error) { if params == nil { params = &ModifyImageAttributeInput{} @@ -41,8 +40,8 @@ type ModifyImageAttributeInput struct { // This member is required. ImageId *string - // The name of the attribute to modify. The valid values are description, - // launchPermission, and productCodes. + // The name of the attribute to modify. The valid values are description and + // launchPermission. Attribute *string // A new description for the AMI. @@ -61,20 +60,19 @@ type ModifyImageAttributeInput struct { // is launchPermission. OperationType types.OperationType - // The DevPay product codes. After you add a product code to an AMI, it can't be - // removed. + // Not supported. ProductCodes []string // The user groups. This parameter can be used only when the Attribute parameter is // launchPermission. UserGroups []string - // The AWS account IDs. This parameter can be used only when the Attribute - // parameter is launchPermission. + // The Amazon Web Services account IDs. This parameter can be used only when the + // Attribute parameter is launchPermission. UserIds []string // The value of the attribute being modified. This parameter can be used only when - // the Attribute parameter is description or productCodes. + // the Attribute parameter is description. Value *string noSmithyDocumentSerde diff --git a/service/ec2/api_op_ModifyInstancePlacement.go b/service/ec2/api_op_ModifyInstancePlacement.go index a3cd5643217..b6d96cdc7c1 100644 --- a/service/ec2/api_op_ModifyInstancePlacement.go +++ b/service/ec2/api_op_ModifyInstancePlacement.go @@ -77,7 +77,9 @@ type ModifyInstancePlacementInput struct { // Reserved for future use. PartitionNumber *int32 - // The tenancy for the instance. + // The tenancy for the instance. For T3 instances, you can't change the tenancy + // from dedicated to host, or from host to dedicated. Attempting to make one of + // these unsupported tenancy changes results in the InvalidTenancy error code. Tenancy types.HostTenancy noSmithyDocumentSerde diff --git a/service/ec2/api_op_ModifyManagedPrefixList.go b/service/ec2/api_op_ModifyManagedPrefixList.go index 806f9b4b5ad..bc0e73113ab 100644 --- a/service/ec2/api_op_ModifyManagedPrefixList.go +++ b/service/ec2/api_op_ModifyManagedPrefixList.go @@ -50,7 +50,10 @@ type ModifyManagedPrefixListInput struct { DryRun *bool // The maximum number of entries for the prefix list. You cannot modify the entries - // of a prefix list and modify the size of a prefix list at the same time. + // of a prefix list and modify the size of a prefix list at the same time. If any + // of the resources that reference the prefix list cannot support the new maximum + // size, the modify operation fails. Check the state message for the IDs of the + // first ten resources that do not support the new maximum size. MaxEntries *int32 // A name for the prefix list. diff --git a/service/ec2/api_op_RegisterImage.go b/service/ec2/api_op_RegisterImage.go index c6c5b476dfc..93fdd55beeb 100644 --- a/service/ec2/api_op_RegisterImage.go +++ b/service/ec2/api_op_RegisterImage.go @@ -28,32 +28,32 @@ import ( // root volume of an instance launched from the AMI is encrypted. For more // information, see Create a Linux AMI from a snapshot // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html#creating-launching-ami-from-snapshot) -// and Use encryption with EBS-backed AMIs +// and Use encryption with Amazon EBS-backed AMIs // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIEncryption.html) in the -// Amazon Elastic Compute Cloud User Guide. AWS Marketplace product codes If any -// snapshots have AWS Marketplace product codes, they are copied to the new AMI. -// Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) -// and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code -// associated with an AMI to verify the subscription status for package updates. To -// create a new AMI for operating systems that require a billing product code, -// instead of registering the AMI, do the following to preserve the billing product -// code association: +// Amazon Elastic Compute Cloud User Guide. Amazon Web Services Marketplace product +// codes If any snapshots have Amazon Web Services Marketplace product codes, they +// are copied to the new AMI. Windows and some Linux distributions, such as Red Hat +// Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the Amazon +// EC2 billing product code associated with an AMI to verify the subscription +// status for package updates. To create a new AMI for operating systems that +// require a billing product code, instead of registering the AMI, do the following +// to preserve the billing product code association: // -// * Launch an instance from an existing AMI with that billing -// product code. +// * Launch an instance from an +// existing AMI with that billing product code. // // * Customize the instance. // -// * Create an AMI from the instance -// using CreateImage. +// * +// Create an AMI from the instance using CreateImage. // -// If you purchase a Reserved Instance to apply to an On-Demand -// Instance that was launched from an AMI with a billing product code, make sure -// that the Reserved Instance has the matching billing product code. If you -// purchase a Reserved Instance without the matching billing product code, the -// Reserved Instance will not be applied to the On-Demand Instance. For information -// about how to obtain the platform details and billing information of an AMI, see -// Obtaining billing information +// If you purchase a Reserved +// Instance to apply to an On-Demand Instance that was launched from an AMI with a +// billing product code, make sure that the Reserved Instance has the matching +// billing product code. If you purchase a Reserved Instance without the matching +// billing product code, the Reserved Instance will not be applied to the On-Demand +// Instance. For information about how to obtain the platform details and billing +// information of an AMI, see Understanding AMI billing // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) in // the Amazon Elastic Compute Cloud User Guide. func (c *Client) RegisterImage(ctx context.Context, params *RegisterImageInput, optFns ...func(*Options)) (*RegisterImageOutput, error) { @@ -86,16 +86,16 @@ type RegisterImageInput struct { Architecture types.ArchitectureValues // The billing product codes. Your account must be authorized to specify billing - // product codes. Otherwise, you can use the AWS Marketplace to bill for the use of - // an AMI. + // product codes. Otherwise, you can use the Amazon Web Services Marketplace to + // bill for the use of an AMI. BillingProducts []string - // The block device mapping entries. If you specify an EBS volume using the ID of - // an EBS snapshot, you can't specify the encryption state of the volume. If you - // create an AMI on an Outpost, then all backing snapshots must be on the same - // Outpost or in the Region of that Outpost. AMIs on an Outpost that include local - // snapshots can be used to launch instances on the same Outpost only. For more - // information, Amazon EBS local snapshots on Outposts + // The block device mapping entries. If you specify an Amazon EBS volume using the + // ID of an Amazon EBS snapshot, you can't specify the encryption state of the + // volume. If you create an AMI on an Outpost, then all backing snapshots must be + // on the same Outpost or in the Region of that Outpost. AMIs on an Outpost that + // include local snapshots can be used to launch instances on the same Outpost + // only. For more information, Amazon EBS local snapshots on Outposts // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snapshots-outposts.html#ami) // in the Amazon Elastic Compute Cloud User Guide. BlockDeviceMappings []types.BlockDeviceMapping diff --git a/service/ec2/api_op_ResetImageAttribute.go b/service/ec2/api_op_ResetImageAttribute.go index d9f7f053a1a..53f621d68c6 100644 --- a/service/ec2/api_op_ResetImageAttribute.go +++ b/service/ec2/api_op_ResetImageAttribute.go @@ -11,8 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Resets an attribute of an AMI to its default value. The productCodes attribute -// can't be reset. +// Resets an attribute of an AMI to its default value. func (c *Client) ResetImageAttribute(ctx context.Context, params *ResetImageAttributeInput, optFns ...func(*Options)) (*ResetImageAttributeOutput, error) { if params == nil { params = &ResetImageAttributeInput{} diff --git a/service/ec2/api_op_RunInstances.go b/service/ec2/api_op_RunInstances.go index f802b311de5..a1d94cefeec 100644 --- a/service/ec2/api_op_RunInstances.go +++ b/service/ec2/api_op_RunInstances.go @@ -146,7 +146,8 @@ type RunInstancesInput struct { // For more information, see Burstable performance instances // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon EC2 User Guide. Default: standard (T2 instances) or unlimited - // (T3/T3a instances) + // (T3/T3a instances) For T3 instances with host tenancy, only standard is + // supported. CreditSpecification *types.CreditSpecificationRequest // If you set this parameter to true, you can't terminate the instance using the diff --git a/service/ec2/api_op_StartInstances.go b/service/ec2/api_op_StartInstances.go index 964fc330f7f..926563e68e5 100644 --- a/service/ec2/api_op_StartInstances.go +++ b/service/ec2/api_op_StartInstances.go @@ -21,8 +21,12 @@ import ( // usage, and thereafter charges per second for instance usage. Before stopping an // instance, make sure it is in a state from which it can be restarted. Stopping an // instance does not preserve data stored in RAM. Performing this operation on an -// instance that uses an instance store as its root device returns an error. For -// more information, see Stopping instances +// instance that uses an instance store as its root device returns an error. If you +// attempt to start a T3 instance with host tenancy and the unlimted CPU credit +// option, the request fails. The unlimited CPU credit option is not supported on +// Dedicated Hosts. Before you start the instance, either change its CPU credit +// option to standard, or change its tenancy to default or dedicated. For more +// information, see Stopping instances // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) in the // Amazon EC2 User Guide. func (c *Client) StartInstances(ctx context.Context, params *StartInstancesInput, optFns ...func(*Options)) (*StartInstancesOutput, error) { diff --git a/service/ec2/types/enums.go b/service/ec2/types/enums.go index ddfab039215..0cbed3c511f 100644 --- a/service/ec2/types/enums.go +++ b/service/ec2/types/enums.go @@ -204,9 +204,10 @@ type ArchitectureValues string // Enum values for ArchitectureValues const ( - ArchitectureValuesI386 ArchitectureValues = "i386" - ArchitectureValuesX8664 ArchitectureValues = "x86_64" - ArchitectureValuesArm64 ArchitectureValues = "arm64" + ArchitectureValuesI386 ArchitectureValues = "i386" + ArchitectureValuesX8664 ArchitectureValues = "x86_64" + ArchitectureValuesArm64 ArchitectureValues = "arm64" + ArchitectureValuesX8664Mac ArchitectureValues = "x86_64_mac" ) // Values returns all known values for ArchitectureValues. Note that this can be @@ -217,6 +218,7 @@ func (ArchitectureValues) Values() []ArchitectureValues { "i386", "x86_64", "arm64", + "x86_64_mac", } } @@ -2560,6 +2562,9 @@ const ( InstanceTypeX2gd12xlarge InstanceType = "x2gd.12xlarge" InstanceTypeX2gd16xlarge InstanceType = "x2gd.16xlarge" InstanceTypeX2gdMetal InstanceType = "x2gd.metal" + InstanceTypeVt13xlarge InstanceType = "vt1.3xlarge" + InstanceTypeVt16xlarge InstanceType = "vt1.6xlarge" + InstanceTypeVt124xlarge InstanceType = "vt1.24xlarge" ) // Values returns all known values for InstanceType. Note that this can be expanded @@ -2984,6 +2989,9 @@ func (InstanceType) Values() []InstanceType { "x2gd.12xlarge", "x2gd.16xlarge", "x2gd.metal", + "vt1.3xlarge", + "vt1.6xlarge", + "vt1.24xlarge", } } @@ -4073,55 +4081,68 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeClientVpnEndpoint ResourceType = "client-vpn-endpoint" - ResourceTypeCustomerGateway ResourceType = "customer-gateway" - ResourceTypeDedicatedHost ResourceType = "dedicated-host" - ResourceTypeDhcpOptions ResourceType = "dhcp-options" - ResourceTypeEgressOnlyInternetGateway ResourceType = "egress-only-internet-gateway" - ResourceTypeElasticIp ResourceType = "elastic-ip" - ResourceTypeElasticGpu ResourceType = "elastic-gpu" - ResourceTypeExportImageTask ResourceType = "export-image-task" - ResourceTypeExportInstanceTask ResourceType = "export-instance-task" - ResourceTypeFleet ResourceType = "fleet" - ResourceTypeFpgaImage ResourceType = "fpga-image" - ResourceTypeHostReservation ResourceType = "host-reservation" - ResourceTypeImage ResourceType = "image" - ResourceTypeImportImageTask ResourceType = "import-image-task" - ResourceTypeImportSnapshotTask ResourceType = "import-snapshot-task" - ResourceTypeInstance ResourceType = "instance" - ResourceTypeInstanceEventWindow ResourceType = "instance-event-window" - ResourceTypeInternetGateway ResourceType = "internet-gateway" - ResourceTypeKeyPair ResourceType = "key-pair" - ResourceTypeLaunchTemplate ResourceType = "launch-template" - ResourceTypeLocalGatewayRouteTableVpcAssociation ResourceType = "local-gateway-route-table-vpc-association" - ResourceTypeNatgateway ResourceType = "natgateway" - ResourceTypeNetworkAcl ResourceType = "network-acl" - ResourceTypeNetworkInterface ResourceType = "network-interface" - ResourceTypeNetworkInsightsAnalysis ResourceType = "network-insights-analysis" - ResourceTypeNetworkInsightsPath ResourceType = "network-insights-path" - ResourceTypePlacementGroup ResourceType = "placement-group" - ResourceTypeReservedInstances ResourceType = "reserved-instances" - ResourceTypeRouteTable ResourceType = "route-table" - ResourceTypeSecurityGroup ResourceType = "security-group" - ResourceTypeSecurityGroupRule ResourceType = "security-group-rule" - ResourceTypeSnapshot ResourceType = "snapshot" - ResourceTypeSpotFleetRequest ResourceType = "spot-fleet-request" - ResourceTypeSpotInstancesRequest ResourceType = "spot-instances-request" - ResourceTypeSubnet ResourceType = "subnet" - ResourceTypeTrafficMirrorFilter ResourceType = "traffic-mirror-filter" - ResourceTypeTrafficMirrorSession ResourceType = "traffic-mirror-session" - ResourceTypeTrafficMirrorTarget ResourceType = "traffic-mirror-target" - ResourceTypeTransitGateway ResourceType = "transit-gateway" - ResourceTypeTransitGatewayAttachment ResourceType = "transit-gateway-attachment" - ResourceTypeTransitGatewayConnectPeer ResourceType = "transit-gateway-connect-peer" - ResourceTypeTransitGatewayMulticastDomain ResourceType = "transit-gateway-multicast-domain" - ResourceTypeTransitGatewayRouteTable ResourceType = "transit-gateway-route-table" - ResourceTypeVolume ResourceType = "volume" - ResourceTypeVpc ResourceType = "vpc" - ResourceTypeVpcPeeringConnection ResourceType = "vpc-peering-connection" - ResourceTypeVpnConnection ResourceType = "vpn-connection" - ResourceTypeVpnGateway ResourceType = "vpn-gateway" - ResourceTypeVpcFlowLog ResourceType = "vpc-flow-log" + ResourceTypeCapacityReservation ResourceType = "capacity-reservation" + ResourceTypeClientVpnEndpoint ResourceType = "client-vpn-endpoint" + ResourceTypeCustomerGateway ResourceType = "customer-gateway" + ResourceTypeCarrierGateway ResourceType = "carrier-gateway" + ResourceTypeDedicatedHost ResourceType = "dedicated-host" + ResourceTypeDhcpOptions ResourceType = "dhcp-options" + ResourceTypeEgressOnlyInternetGateway ResourceType = "egress-only-internet-gateway" + ResourceTypeElasticIp ResourceType = "elastic-ip" + ResourceTypeElasticGpu ResourceType = "elastic-gpu" + ResourceTypeExportImageTask ResourceType = "export-image-task" + ResourceTypeExportInstanceTask ResourceType = "export-instance-task" + ResourceTypeFleet ResourceType = "fleet" + ResourceTypeFpgaImage ResourceType = "fpga-image" + ResourceTypeHostReservation ResourceType = "host-reservation" + ResourceTypeImage ResourceType = "image" + ResourceTypeImportImageTask ResourceType = "import-image-task" + ResourceTypeImportSnapshotTask ResourceType = "import-snapshot-task" + ResourceTypeInstance ResourceType = "instance" + ResourceTypeInstanceEventWindow ResourceType = "instance-event-window" + ResourceTypeInternetGateway ResourceType = "internet-gateway" + ResourceTypeIpv4poolEc2 ResourceType = "ipv4pool-ec2" + ResourceTypeIpv6poolEc2 ResourceType = "ipv6pool-ec2" + ResourceTypeKeyPair ResourceType = "key-pair" + ResourceTypeLaunchTemplate ResourceType = "launch-template" + ResourceTypeLocalGateway ResourceType = "local-gateway" + ResourceTypeLocalGatewayRouteTable ResourceType = "local-gateway-route-table" + ResourceTypeLocalGatewayVirtualInterface ResourceType = "local-gateway-virtual-interface" + ResourceTypeLocalGatewayVirtualInterfaceGroup ResourceType = "local-gateway-virtual-interface-group" + ResourceTypeLocalGatewayRouteTableVpcAssociation ResourceType = "local-gateway-route-table-vpc-association" + ResourceTypeLocalGatewayRouteTableVirtualInterfaceGroupAssociation ResourceType = "local-gateway-route-table-virtual-interface-group-association" + ResourceTypeNatgateway ResourceType = "natgateway" + ResourceTypeNetworkAcl ResourceType = "network-acl" + ResourceTypeNetworkInterface ResourceType = "network-interface" + ResourceTypeNetworkInsightsAnalysis ResourceType = "network-insights-analysis" + ResourceTypeNetworkInsightsPath ResourceType = "network-insights-path" + ResourceTypePlacementGroup ResourceType = "placement-group" + ResourceTypePrefixList ResourceType = "prefix-list" + ResourceTypeReplaceRootVolumeTask ResourceType = "replace-root-volume-task" + ResourceTypeReservedInstances ResourceType = "reserved-instances" + ResourceTypeRouteTable ResourceType = "route-table" + ResourceTypeSecurityGroup ResourceType = "security-group" + ResourceTypeSecurityGroupRule ResourceType = "security-group-rule" + ResourceTypeSnapshot ResourceType = "snapshot" + ResourceTypeSpotFleetRequest ResourceType = "spot-fleet-request" + ResourceTypeSpotInstancesRequest ResourceType = "spot-instances-request" + ResourceTypeSubnet ResourceType = "subnet" + ResourceTypeTrafficMirrorFilter ResourceType = "traffic-mirror-filter" + ResourceTypeTrafficMirrorSession ResourceType = "traffic-mirror-session" + ResourceTypeTrafficMirrorTarget ResourceType = "traffic-mirror-target" + ResourceTypeTransitGateway ResourceType = "transit-gateway" + ResourceTypeTransitGatewayAttachment ResourceType = "transit-gateway-attachment" + ResourceTypeTransitGatewayConnectPeer ResourceType = "transit-gateway-connect-peer" + ResourceTypeTransitGatewayMulticastDomain ResourceType = "transit-gateway-multicast-domain" + ResourceTypeTransitGatewayRouteTable ResourceType = "transit-gateway-route-table" + ResourceTypeVolume ResourceType = "volume" + ResourceTypeVpc ResourceType = "vpc" + ResourceTypeVpcEndpoint ResourceType = "vpc-endpoint" + ResourceTypeVpcEndpointService ResourceType = "vpc-endpoint-service" + ResourceTypeVpcPeeringConnection ResourceType = "vpc-peering-connection" + ResourceTypeVpnConnection ResourceType = "vpn-connection" + ResourceTypeVpnGateway ResourceType = "vpn-gateway" + ResourceTypeVpcFlowLog ResourceType = "vpc-flow-log" ) // Values returns all known values for ResourceType. Note that this can be expanded @@ -4129,8 +4150,10 @@ const ( // this slice is not guaranteed to be stable across updates. func (ResourceType) Values() []ResourceType { return []ResourceType{ + "capacity-reservation", "client-vpn-endpoint", "customer-gateway", + "carrier-gateway", "dedicated-host", "dhcp-options", "egress-only-internet-gateway", @@ -4147,15 +4170,24 @@ func (ResourceType) Values() []ResourceType { "instance", "instance-event-window", "internet-gateway", + "ipv4pool-ec2", + "ipv6pool-ec2", "key-pair", "launch-template", + "local-gateway", + "local-gateway-route-table", + "local-gateway-virtual-interface", + "local-gateway-virtual-interface-group", "local-gateway-route-table-vpc-association", + "local-gateway-route-table-virtual-interface-group-association", "natgateway", "network-acl", "network-interface", "network-insights-analysis", "network-insights-path", "placement-group", + "prefix-list", + "replace-root-volume-task", "reserved-instances", "route-table", "security-group", @@ -4174,6 +4206,8 @@ func (ResourceType) Values() []ResourceType { "transit-gateway-route-table", "volume", "vpc", + "vpc-endpoint", + "vpc-endpoint-service", "vpc-peering-connection", "vpn-connection", "vpn-gateway", diff --git a/service/ec2/types/types.go b/service/ec2/types/types.go index 1eafbbe3612..597bc77ad7b 100644 --- a/service/ec2/types/types.go +++ b/service/ec2/types/types.go @@ -2643,35 +2643,32 @@ type EventInformation struct { // modify_succeeded - The EC2 Fleet or Spot Fleet request was modified. // // * - // price_update - The price for a launch configuration was adjusted because it was - // too high. This change is permanent. + // submitted - The EC2 Fleet or Spot Fleet request is being evaluated and Amazon + // EC2 is preparing to launch the target number of Spot Instances. // - // * submitted - The EC2 Fleet or Spot Fleet - // request is being evaluated and Amazon EC2 is preparing to launch the target - // number of Spot Instances. + // The following + // are the instanceChange events: // - // The following are the instanceChange events: + // * launched - A request was fulfilled and a new + // instance was launched. // - // * - // launched - A request was fulfilled and a new instance was launched. - // - // * - // terminated - An instance was terminated by the user. + // * terminated - An instance was terminated by the + // user. // - // The following are the - // Information events: + // The following are the Information events: // - // * launchSpecTemporarilyBlacklisted - The configuration is - // not valid and several attempts to launch instances have failed. For more - // information, see the description of the event. + // * + // launchSpecTemporarilyBlacklisted - The configuration is not valid and several + // attempts to launch instances have failed. For more information, see the + // description of the event. // - // * launchSpecUnusable - The price - // in a launch specification is not valid because it is below the Spot price or the - // Spot price is above the On-Demand price. + // * launchSpecUnusable - The price in a launch + // specification is not valid because it is below the Spot price or the Spot price + // is above the On-Demand price. // - // * fleetProgressHalted - The price in - // every launch specification is not valid. A launch specification might become - // valid if the Spot price changes. + // * fleetProgressHalted - The price in every launch + // specification is not valid. A launch specification might become valid if the + // Spot price changes. EventSubType *string // The ID of the instance. This information is available only for instanceChange @@ -3952,8 +3949,8 @@ type Image struct { // The location of the AMI. ImageLocation *string - // The AWS account alias (for example, amazon, self) or the AWS account ID of the - // AMI owner. + // The Amazon Web Services account alias (for example, amazon, self) or the Amazon + // Web Services account ID of the AMI owner. ImageOwnerAlias *string // The type of image. @@ -3966,14 +3963,14 @@ type Image struct { // The name of the AMI that was provided during image creation. Name *string - // The AWS account ID of the image owner. + // The ID of the Amazon Web Services account that owns the image. OwnerId *string // This value is set to windows for Windows AMIs; otherwise, it is blank. Platform PlatformValues // The platform details associated with the billing code of the AMI. For more - // information, see Obtaining Billing Information + // information, see Understanding AMI billing // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) in // the Amazon Elastic Compute Cloud User Guide. PlatformDetails *string @@ -3993,8 +3990,8 @@ type Image struct { // The device name of the root device volume (for example, /dev/sda1). RootDeviceName *string - // The type of root device used by the AMI. The AMI can use an EBS volume or an - // instance store volume. + // The type of root device used by the AMI. The AMI can use an Amazon EBS volume or + // an instance store volume. RootDeviceType DeviceType // Specifies whether enhanced networking with the Intel 82599 Virtual Function @@ -4014,12 +4011,15 @@ type Image struct { // The operation of the Amazon EC2 instance and the billing code that is associated // with the AMI. usageOperation corresponds to the lineitem/Operation // (https://docs.aws.amazon.com/cur/latest/userguide/Lineitem-columns.html#Lineitem-details-O-Operation) - // column on your AWS Cost and Usage Report and in the AWS Price List API + // column on your Amazon Web Services Cost and Usage Report and in the Amazon Web + // Services Price List API // (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/price-changes.html). - // For the list of UsageOperation codes, see Platform Details and Usage Operation - // Billing Codes - // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html#billing-info) - // in the Amazon Elastic Compute Cloud User Guide. + // You can view these fields on the Instances or AMIs pages in the Amazon EC2 + // console, or in the responses that are returned by the DescribeImages + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) + // command in the Amazon EC2 API, or the describe-images + // (https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-images.html) + // command in the CLI. UsageOperation *string // The type of virtualization of the AMI. @@ -4536,7 +4536,8 @@ type InstanceCreditSpecification struct { type InstanceCreditSpecificationRequest struct { // The credit option for CPU usage of the instance. Valid values are standard and - // unlimited. + // unlimited. T3 instances with host tenancy do not support the unlimited CPU + // credit option. CpuCredits *string // The ID of the instance. @@ -5619,8 +5620,8 @@ type LaunchPermission struct { // The name of the group. Group PermissionGroup - // The AWS account ID. Constraints: Up to 10 000 account IDs can be specified in a - // single request. + // The Amazon Web Services account ID. Constraints: Up to 10 000 account IDs can be + // specified in a single request. UserId *string noSmithyDocumentSerde @@ -5629,10 +5630,12 @@ type LaunchPermission struct { // Describes a launch permission modification. type LaunchPermissionModifications struct { - // The AWS account ID to add to the list of launch permissions for the AMI. + // The Amazon Web Services account ID to add to the list of launch permissions for + // the AMI. Add []LaunchPermission - // The AWS account ID to remove from the list of launch permissions for the AMI. + // The Amazon Web Services account ID to remove from the list of launch permissions + // for the AMI. Remove []LaunchPermission noSmithyDocumentSerde @@ -6853,7 +6856,7 @@ type ManagedPrefixList struct { // The name of the prefix list. PrefixListName *string - // The state of the prefix list. + // The current state of the prefix list. State PrefixListState // The state message. @@ -7965,7 +7968,8 @@ type Placement struct { // not supported for the ImportInstance // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) // command. This parameter is not supported by CreateFleet - // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). T3 + // instances that use the unlimited CPU credit option do not support host tenancy. Tenancy Tenancy noSmithyDocumentSerde @@ -9276,8 +9280,8 @@ type RunInstancesMonitoringEnabled struct { noSmithyDocumentSerde } -// The tags to apply to the AMI object that will be stored in the S3 bucket. For -// more information, see Categorizing your storage using tags +// The tags to apply to the AMI object that will be stored in the Amazon S3 bucket. +// For more information, see Categorizing your storage using tags // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) in // the Amazon Simple Storage Service User Guide. type S3ObjectTag struct { @@ -9293,13 +9297,13 @@ type S3ObjectTag struct { noSmithyDocumentSerde } -// Describes the storage parameters for S3 and S3 buckets for an instance -// store-backed AMI. +// Describes the storage parameters for Amazon S3 and Amazon S3 buckets for an +// instance store-backed AMI. type S3Storage struct { // The access key ID of the owner of the bucket. Before you specify a value for // your access key ID, review and follow the guidance in Best Practices for - // Managing AWS Access Keys + // Managing Amazon Web Services Access Keys // (https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). AWSAccessKeyId *string @@ -11034,7 +11038,7 @@ type StoreImageTaskResult struct { // The ID of the AMI that is being stored. AmiId *string - // The name of the S3 bucket that contains the stored AMI object. + // The name of the Amazon S3 bucket that contains the stored AMI object. Bucket *string // The progress of the task as a percentage. @@ -11238,23 +11242,22 @@ type TagDescription struct { // The tags to apply to a resource when the resource is being created. type TagSpecification struct { - // The type of resource to tag. Currently, the resource types that support tagging - // on creation are: capacity-reservation | carrier-gateway | client-vpn-endpoint | - // customer-gateway | dedicated-host | dhcp-options | egress-only-internet-gateway - // | elastic-ip | elastic-gpu | export-image-task | export-instance-task | fleet | - // fpga-image | host-reservation | image| import-image-task | import-snapshot-task - // | instance | instance-event-window | internet-gateway | ipv4pool-ec2 | - // ipv6pool-ec2 | key-pair | launch-template | - // local-gateway-route-table-vpc-association | placement-group | prefix-list | - // natgateway | network-acl | network-interface | reserved-instances |route-table | - // security-group| snapshot | spot-fleet-request | spot-instances-request | - // snapshot | subnet | traffic-mirror-filter | traffic-mirror-session | - // traffic-mirror-target | transit-gateway | transit-gateway-attachment | - // transit-gateway-multicast-domain | transit-gateway-route-table | volume |vpc | - // vpc-peering-connection | vpc-endpoint (for interface and gateway endpoints) | - // vpc-endpoint-service (for Amazon Web Services PrivateLink) | vpc-flow-log | - // vpn-connection | vpn-gateway. To tag a resource after it has been created, see - // CreateTags + // The type of resource to tag on creation. The possible values are: + // capacity-reservation | carrier-gateway | client-vpn-endpoint | customer-gateway + // | dedicated-host | dhcp-options | egress-only-internet-gateway | elastic-gpu | + // elastic-ip | export-image-task | export-instance-task | fleet | fpga-image | + // host-reservation | image | import-image-task | import-snapshot-task | instance | + // instance-event-window | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 | + // key-pair | launch-template | local-gateway-route-table-vpc-association | + // natgateway | network-acl | network-insights-analysis | network-insights-path | + // network-interface | placement-group | prefix-list | reserved-instances | + // route-table | security-group | security-group-rule | snapshot | + // spot-fleet-request | spot-instances-request | subnet | traffic-mirror-filter | + // traffic-mirror-session | traffic-mirror-target | transit-gateway | + // transit-gateway-attachment | transit-gateway-multicast-domain | + // transit-gateway-route-table | volume | vpc | vpc-endpoint | vpc-endpoint-service + // | vpc-flow-log | vpc-peering-connection | vpn-connection | vpn-gateway. To tag a + // resource after it has been created, see CreateTags // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). ResourceType ResourceType @@ -11353,8 +11356,8 @@ type TargetConfigurationRequest struct { // This member is required. OfferingId *string - // The number of instances the Covertible Reserved Instance offering can be applied - // to. This parameter is reserved and cannot be specified in a request + // The number of instances the Convertible Reserved Instance offering can be + // applied to. This parameter is reserved and cannot be specified in a request InstanceCount *int32 noSmithyDocumentSerde diff --git a/service/ecr/api_op_BatchCheckLayerAvailability.go b/service/ecr/api_op_BatchCheckLayerAvailability.go index c4e5172c54b..cea7521f689 100644 --- a/service/ecr/api_op_BatchCheckLayerAvailability.go +++ b/service/ecr/api_op_BatchCheckLayerAvailability.go @@ -44,8 +44,9 @@ type BatchCheckLayerAvailabilityInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry that contains the image layers - // to check. If you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the image layers to check. If you do not specify a registry, the default + // registry is assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_BatchDeleteImage.go b/service/ecr/api_op_BatchDeleteImage.go index 2712000ccc6..80c9d5b5cf4 100644 --- a/service/ecr/api_op_BatchDeleteImage.go +++ b/service/ecr/api_op_BatchDeleteImage.go @@ -46,8 +46,9 @@ type BatchDeleteImageInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry that contains the image to - // delete. If you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the image to delete. If you do not specify a registry, the default registry is + // assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_BatchGetImage.go b/service/ecr/api_op_BatchGetImage.go index 36c6f7e0c43..b2724f89a54 100644 --- a/service/ecr/api_op_BatchGetImage.go +++ b/service/ecr/api_op_BatchGetImage.go @@ -48,8 +48,9 @@ type BatchGetImageInput struct { // application/vnd.oci.image.manifest.v1+json AcceptedMediaTypes []string - // The AWS account ID associated with the registry that contains the images to - // describe. If you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the images to describe. If you do not specify a registry, the default registry + // is assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_CompleteLayerUpload.go b/service/ecr/api_op_CompleteLayerUpload.go index 6ef6a040c1e..ac1fe8a370c 100644 --- a/service/ecr/api_op_CompleteLayerUpload.go +++ b/service/ecr/api_op_CompleteLayerUpload.go @@ -50,8 +50,9 @@ type CompleteLayerUploadInput struct { // This member is required. UploadId *string - // The AWS account ID associated with the registry to which to upload layers. If - // you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry to which to + // upload layers. If you do not specify a registry, the default registry is + // assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_CreateRepository.go b/service/ecr/api_op_CreateRepository.go index 0f564ed73f8..a00f130ada5 100644 --- a/service/ecr/api_op_CreateRepository.go +++ b/service/ecr/api_op_CreateRepository.go @@ -11,7 +11,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a repository. For more information, see Amazon ECR Repositories +// Creates a repository. For more information, see Amazon ECR repositories // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) in // the Amazon Elastic Container Registry User Guide. func (c *Client) CreateRepository(ctx context.Context, params *CreateRepositoryInput, optFns ...func(*Options)) (*CreateRepositoryOutput, error) { diff --git a/service/ecr/api_op_DeleteLifecyclePolicy.go b/service/ecr/api_op_DeleteLifecyclePolicy.go index 785208617df..1f215085fff 100644 --- a/service/ecr/api_op_DeleteLifecyclePolicy.go +++ b/service/ecr/api_op_DeleteLifecyclePolicy.go @@ -34,8 +34,9 @@ type DeleteLifecyclePolicyInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry that contains the repository. If - // you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is + // assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_DeleteRepository.go b/service/ecr/api_op_DeleteRepository.go index 921615e3f03..525834b915f 100644 --- a/service/ecr/api_op_DeleteRepository.go +++ b/service/ecr/api_op_DeleteRepository.go @@ -38,8 +38,9 @@ type DeleteRepositoryInput struct { // If a repository contains images, forces the deletion. Force bool - // The AWS account ID associated with the registry that contains the repository to - // delete. If you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository to delete. If you do not specify a registry, the default registry + // is assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_DeleteRepositoryPolicy.go b/service/ecr/api_op_DeleteRepositoryPolicy.go index 3cf256ba6bc..dd23305f84b 100644 --- a/service/ecr/api_op_DeleteRepositoryPolicy.go +++ b/service/ecr/api_op_DeleteRepositoryPolicy.go @@ -34,9 +34,9 @@ type DeleteRepositoryPolicyInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry that contains the repository - // policy to delete. If you do not specify a registry, the default registry is - // assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository policy to delete. If you do not specify a registry, the default + // registry is assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_DescribeImageScanFindings.go b/service/ecr/api_op_DescribeImageScanFindings.go index 392d1013b92..298eaf7eb59 100644 --- a/service/ecr/api_op_DescribeImageScanFindings.go +++ b/service/ecr/api_op_DescribeImageScanFindings.go @@ -61,9 +61,9 @@ type DescribeImageScanFindingsInput struct { // to return. NextToken *string - // The AWS account ID associated with the registry that contains the repository in - // which to describe the image scan findings for. If you do not specify a registry, - // the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to describe the image scan findings for. If you do not + // specify a registry, the default registry is assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_DescribeImages.go b/service/ecr/api_op_DescribeImages.go index 060c4098449..04df99e24c8 100644 --- a/service/ecr/api_op_DescribeImages.go +++ b/service/ecr/api_op_DescribeImages.go @@ -62,9 +62,9 @@ type DescribeImagesInput struct { // This option cannot be used when you specify images with imageIds. NextToken *string - // The AWS account ID associated with the registry that contains the repository in - // which to describe images. If you do not specify a registry, the default registry - // is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to describe images. If you do not specify a registry, + // the default registry is assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_DescribeRepositories.go b/service/ecr/api_op_DescribeRepositories.go index 0b04290d329..38f11ed46cd 100644 --- a/service/ecr/api_op_DescribeRepositories.go +++ b/service/ecr/api_op_DescribeRepositories.go @@ -50,9 +50,9 @@ type DescribeRepositoriesInput struct { // purposes. NextToken *string - // The AWS account ID associated with the registry that contains the repositories - // to be described. If you do not specify a registry, the default registry is - // assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repositories to be described. If you do not specify a registry, the default + // registry is assumed. RegistryId *string // A list of repositories to describe. If this parameter is omitted, then all diff --git a/service/ecr/api_op_GetAuthorizationToken.go b/service/ecr/api_op_GetAuthorizationToken.go index 100feb04a06..4d6dfbbd66f 100644 --- a/service/ecr/api_op_GetAuthorizationToken.go +++ b/service/ecr/api_op_GetAuthorizationToken.go @@ -16,8 +16,8 @@ import ( // that your IAM principal has access to. The authorization token is valid for 12 // hours. The authorizationToken returned is a base64 encoded string that can be // decoded and used in a docker login command to authenticate to a registry. The -// AWS CLI offers an get-login-password command that simplifies the login process. -// For more information, see Registry Authentication +// CLI offers an get-login-password command that simplifies the login process. For +// more information, see Registry authentication // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html#registry_auth) // in the Amazon Elastic Container Registry User Guide. func (c *Client) GetAuthorizationToken(ctx context.Context, params *GetAuthorizationTokenInput, optFns ...func(*Options)) (*GetAuthorizationTokenOutput, error) { @@ -37,9 +37,9 @@ func (c *Client) GetAuthorizationToken(ctx context.Context, params *GetAuthoriza type GetAuthorizationTokenInput struct { - // A list of AWS account IDs that are associated with the registries for which to - // get AuthorizationData objects. If you do not specify a registry, the default - // registry is assumed. + // A list of Amazon Web Services account IDs that are associated with the + // registries for which to get AuthorizationData objects. If you do not specify a + // registry, the default registry is assumed. // // Deprecated: This field is deprecated. The returned authorization token can be // used to access any Amazon ECR registry that the IAM principal has access to, diff --git a/service/ecr/api_op_GetDownloadUrlForLayer.go b/service/ecr/api_op_GetDownloadUrlForLayer.go index aeeded26545..82b584fe38b 100644 --- a/service/ecr/api_op_GetDownloadUrlForLayer.go +++ b/service/ecr/api_op_GetDownloadUrlForLayer.go @@ -43,8 +43,9 @@ type GetDownloadUrlForLayerInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry that contains the image layer to - // download. If you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the image layer to download. If you do not specify a registry, the default + // registry is assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_GetLifecyclePolicy.go b/service/ecr/api_op_GetLifecyclePolicy.go index 9dd0e4d9160..76e8a061ce6 100644 --- a/service/ecr/api_op_GetLifecyclePolicy.go +++ b/service/ecr/api_op_GetLifecyclePolicy.go @@ -34,8 +34,9 @@ type GetLifecyclePolicyInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry that contains the repository. If - // you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is + // assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_GetLifecyclePolicyPreview.go b/service/ecr/api_op_GetLifecyclePolicyPreview.go index ea42413bbd8..ccf8d59a549 100644 --- a/service/ecr/api_op_GetLifecyclePolicyPreview.go +++ b/service/ecr/api_op_GetLifecyclePolicyPreview.go @@ -67,8 +67,9 @@ type GetLifecyclePolicyPreviewInput struct { // specify images with imageIds. NextToken *string - // The AWS account ID associated with the registry that contains the repository. If - // you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is + // assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_GetRepositoryPolicy.go b/service/ecr/api_op_GetRepositoryPolicy.go index 7d8d1ac40e4..15a292ad6b3 100644 --- a/service/ecr/api_op_GetRepositoryPolicy.go +++ b/service/ecr/api_op_GetRepositoryPolicy.go @@ -33,8 +33,9 @@ type GetRepositoryPolicyInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry that contains the repository. If - // you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is + // assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_InitiateLayerUpload.go b/service/ecr/api_op_InitiateLayerUpload.go index e76597c2751..d700d833f1e 100644 --- a/service/ecr/api_op_InitiateLayerUpload.go +++ b/service/ecr/api_op_InitiateLayerUpload.go @@ -39,8 +39,9 @@ type InitiateLayerUploadInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry to which you intend to upload - // layers. If you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry to which you + // intend to upload layers. If you do not specify a registry, the default registry + // is assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_ListImages.go b/service/ecr/api_op_ListImages.go index 542161b16c3..6fda154e28b 100644 --- a/service/ecr/api_op_ListImages.go +++ b/service/ecr/api_op_ListImages.go @@ -60,9 +60,9 @@ type ListImagesInput struct { // retrieve the next items in a list and not for other programmatic purposes. NextToken *string - // The AWS account ID associated with the registry that contains the repository in - // which to list images. If you do not specify a registry, the default registry is - // assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to list images. If you do not specify a registry, the + // default registry is assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_PutImage.go b/service/ecr/api_op_PutImage.go index 3649d61beb5..631a9b408c5 100644 --- a/service/ecr/api_op_PutImage.go +++ b/service/ecr/api_op_PutImage.go @@ -57,9 +57,9 @@ type PutImageInput struct { // formats. ImageTag *string - // The AWS account ID associated with the registry that contains the repository in - // which to put the image. If you do not specify a registry, the default registry - // is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to put the image. If you do not specify a registry, the + // default registry is assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_PutImageScanningConfiguration.go b/service/ecr/api_op_PutImageScanningConfiguration.go index 24e33dfdeab..0d92e0b6ba2 100644 --- a/service/ecr/api_op_PutImageScanningConfiguration.go +++ b/service/ecr/api_op_PutImageScanningConfiguration.go @@ -42,9 +42,9 @@ type PutImageScanningConfigurationInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry that contains the repository in - // which to update the image scanning configuration setting. If you do not specify - // a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to update the image scanning configuration setting. If + // you do not specify a registry, the default registry is assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_PutImageTagMutability.go b/service/ecr/api_op_PutImageTagMutability.go index 71e18eb3455..5e42488a1ad 100644 --- a/service/ecr/api_op_PutImageTagMutability.go +++ b/service/ecr/api_op_PutImageTagMutability.go @@ -12,7 +12,7 @@ import ( ) // Updates the image tag mutability settings for the specified repository. For more -// information, see Image Tag Mutability +// information, see Image tag mutability // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html) // in the Amazon Elastic Container Registry User Guide. func (c *Client) PutImageTagMutability(ctx context.Context, params *PutImageTagMutabilityInput, optFns ...func(*Options)) (*PutImageTagMutabilityOutput, error) { @@ -44,9 +44,9 @@ type PutImageTagMutabilityInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry that contains the repository in - // which to update the image tag mutability settings. If you do not specify a - // registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to update the image tag mutability settings. If you do + // not specify a registry, the default registry is assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_PutLifecyclePolicy.go b/service/ecr/api_op_PutLifecyclePolicy.go index 413481cbea9..782382eefe6 100644 --- a/service/ecr/api_op_PutLifecyclePolicy.go +++ b/service/ecr/api_op_PutLifecyclePolicy.go @@ -11,7 +11,7 @@ import ( ) // Creates or updates the lifecycle policy for the specified repository. For more -// information, see Lifecycle Policy Template +// information, see Lifecycle policy template // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html). func (c *Client) PutLifecyclePolicy(ctx context.Context, params *PutLifecyclePolicyInput, optFns ...func(*Options)) (*PutLifecyclePolicyOutput, error) { if params == nil { @@ -40,8 +40,9 @@ type PutLifecyclePolicyInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry that contains the repository. If - // you do
 not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do
 not specify a registry, the default registry is + // assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_PutRegistryPolicy.go b/service/ecr/api_op_PutRegistryPolicy.go index 8af85ce511a..b253e0e0fcc 100644 --- a/service/ecr/api_op_PutRegistryPolicy.go +++ b/service/ecr/api_op_PutRegistryPolicy.go @@ -11,9 +11,9 @@ import ( ) // Creates or updates the permissions policy for your registry. A registry policy -// is used to specify permissions for another AWS account and is used when -// configuring cross-account replication. For more information, see Registry -// permissions +// is used to specify permissions for another Amazon Web Services account and is +// used when configuring cross-account replication. For more information, see +// Registry permissions // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/registry-permissions.html) // in the Amazon Elastic Container Registry User Guide. func (c *Client) PutRegistryPolicy(ctx context.Context, params *PutRegistryPolicyInput, optFns ...func(*Options)) (*PutRegistryPolicyOutput, error) { diff --git a/service/ecr/api_op_PutReplicationConfiguration.go b/service/ecr/api_op_PutReplicationConfiguration.go index e51d7347e53..e8e84df6eb0 100644 --- a/service/ecr/api_op_PutReplicationConfiguration.go +++ b/service/ecr/api_op_PutReplicationConfiguration.go @@ -15,7 +15,7 @@ import ( // replication configuration for a repository can be retrieved with the // DescribeRegistry API action. The first time the PutReplicationConfiguration API // is called, a service-linked IAM role is created in your account for the -// replication process. For more information, see Using Service-Linked Roles for +// replication process. For more information, see Using service-linked roles for // Amazon ECR // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/using-service-linked-roles.html) // in the Amazon Elastic Container Registry User Guide. When configuring diff --git a/service/ecr/api_op_SetRepositoryPolicy.go b/service/ecr/api_op_SetRepositoryPolicy.go index 2ec06176daf..1078970c70c 100644 --- a/service/ecr/api_op_SetRepositoryPolicy.go +++ b/service/ecr/api_op_SetRepositoryPolicy.go @@ -11,7 +11,7 @@ import ( ) // Applies a repository policy to the specified repository to control access -// permissions. For more information, see Amazon ECR Repository Policies +// permissions. For more information, see Amazon ECR Repository policies // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html) // in the Amazon Elastic Container Registry User Guide. func (c *Client) SetRepositoryPolicy(ctx context.Context, params *SetRepositoryPolicyInput, optFns ...func(*Options)) (*SetRepositoryPolicyOutput, error) { @@ -32,7 +32,7 @@ func (c *Client) SetRepositoryPolicy(ctx context.Context, params *SetRepositoryP type SetRepositoryPolicyInput struct { // The JSON repository policy text to apply to the repository. For more - // information, see Amazon ECR Repository Policies + // information, see Amazon ECR repository policies // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) // in the Amazon Elastic Container Registry User Guide. // @@ -50,8 +50,9 @@ type SetRepositoryPolicyInput struct { // lock outs. Force bool - // The AWS account ID associated with the registry that contains the repository. If - // you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is + // assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_StartImageScan.go b/service/ecr/api_op_StartImageScan.go index 195adff504d..ce76af76d3b 100644 --- a/service/ecr/api_op_StartImageScan.go +++ b/service/ecr/api_op_StartImageScan.go @@ -12,8 +12,8 @@ import ( ) // Starts an image vulnerability scan. An image scan can only be started once per -// day on an individual image. This limit includes if an image was scanned on -// initial push. For more information, see Image Scanning +// 24 hours on an individual image. This limit includes if an image was scanned on +// initial push. For more information, see Image scanning // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html) in // the Amazon Elastic Container Registry User Guide. func (c *Client) StartImageScan(ctx context.Context, params *StartImageScanInput, optFns ...func(*Options)) (*StartImageScanOutput, error) { @@ -43,9 +43,9 @@ type StartImageScanInput struct { // This member is required. RepositoryName *string - // The AWS account ID associated with the registry that contains the repository in - // which to start an image scan request. If you do not specify a registry, the - // default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository in which to start an image scan request. If you do not specify a + // registry, the default registry is assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_StartLifecyclePolicyPreview.go b/service/ecr/api_op_StartLifecyclePolicyPreview.go index e5c20142712..1169ac4432a 100644 --- a/service/ecr/api_op_StartLifecyclePolicyPreview.go +++ b/service/ecr/api_op_StartLifecyclePolicyPreview.go @@ -40,8 +40,9 @@ type StartLifecyclePolicyPreviewInput struct { // policy for the repository is used. LifecyclePolicyText *string - // The AWS account ID associated with the registry that contains the repository. If - // you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry that contains + // the repository. If you do not specify a registry, the default registry is + // assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/api_op_UploadLayerPart.go b/service/ecr/api_op_UploadLayerPart.go index 837b4422c7d..4acf7e5b0cc 100644 --- a/service/ecr/api_op_UploadLayerPart.go +++ b/service/ecr/api_op_UploadLayerPart.go @@ -59,8 +59,9 @@ type UploadLayerPartInput struct { // This member is required. UploadId *string - // The AWS account ID associated with the registry to which you are uploading layer - // parts. If you do not specify a registry, the default registry is assumed. + // The Amazon Web Services account ID associated with the registry to which you are + // uploading layer parts. If you do not specify a registry, the default registry is + // assumed. RegistryId *string noSmithyDocumentSerde diff --git a/service/ecr/doc.go b/service/ecr/doc.go index 26ec23c7c8c..602df615b1e 100644 --- a/service/ecr/doc.go +++ b/service/ecr/doc.go @@ -9,5 +9,8 @@ // ECR provides a secure, scalable, and reliable registry for your Docker or Open // Container Initiative (OCI) images. Amazon ECR supports private repositories with // resource-based permissions using IAM so that specific users or Amazon EC2 -// instances can access repositories and images. +// instances can access repositories and images. Amazon ECR has service endpoints +// in each supported Region. For more information, see Amazon ECR endpoints +// (https://docs.aws.amazon.com/general/latest/gr/ecr.html) in the Amazon Web +// Services General Reference. package ecr diff --git a/service/ecr/types/errors.go b/service/ecr/types/errors.go index 06012535602..536f374252e 100644 --- a/service/ecr/types/errors.go +++ b/service/ecr/types/errors.go @@ -361,7 +361,7 @@ func (e *LifecyclePolicyPreviewNotFoundException) ErrorFault() smithy.ErrorFault } // The operation did not succeed because it would have exceeded a service limit for -// your account. For more information, see Amazon ECR Service Quotas +// your account. For more information, see Amazon ECR service quotas // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) in // the Amazon Elastic Container Registry User Guide. type LimitExceededException struct { diff --git a/service/ecr/types/types.go b/service/ecr/types/types.go index 5dc4e3fcd11..ec50b2a728b 100644 --- a/service/ecr/types/types.go +++ b/service/ecr/types/types.go @@ -58,36 +58,36 @@ type DescribeImagesFilter struct { // ECR uses server-side encryption with Amazon S3-managed encryption keys which // encrypts your data at rest using an AES-256 encryption algorithm. This does not // require any action on your part. For more control over the encryption of the -// contents of your repository, you can use server-side encryption with customer -// master keys (CMKs) stored in AWS Key Management Service (AWS KMS) to encrypt -// your images. For more information, see Amazon ECR encryption at rest +// contents of your repository, you can use server-side encryption with Key +// Management Service key stored in Key Management Service (KMS) to encrypt your +// images. For more information, see Amazon ECR encryption at rest // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) // in the Amazon Elastic Container Registry User Guide. type EncryptionConfiguration struct { // The encryption type to use. If you use the KMS encryption type, the contents of - // the repository will be encrypted using server-side encryption with customer - // master keys (CMKs) stored in AWS KMS. When you use AWS KMS to encrypt your data, - // you can either use the default AWS managed CMK for Amazon ECR, or specify your - // own CMK, which you already created. For more information, see Protecting Data - // Using Server-Side Encryption with CMKs Stored in AWS Key Management Service - // (SSE-KMS) + // the repository will be encrypted using server-side encryption with Key + // Management Service key stored in KMS. When you use KMS to encrypt your data, you + // can either use the default Amazon Web Services managed KMS key for Amazon ECR, + // or specify your own KMS key, which you already created. For more information, + // see Protecting data using server-side encryption with an KMS key stored in Key + // Management Service (SSE-KMS) // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the // Amazon Simple Storage Service Console Developer Guide.. If you use the AES256 // encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed // encryption keys which encrypts the images in the repository using an AES-256 - // encryption algorithm. For more information, see Protecting Data Using - // Server-Side Encryption with Amazon S3-Managed Encryption Keys (SSE-S3) + // encryption algorithm. For more information, see Protecting data using + // server-side encryption with Amazon S3-managed encryption keys (SSE-S3) // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) // in the Amazon Simple Storage Service Console Developer Guide.. // // This member is required. EncryptionType EncryptionType - // If you use the KMS encryption type, specify the CMK to use for encryption. The - // alias, key ID, or full ARN of the CMK can be specified. The key must exist in - // the same Region as the repository. If no key is specified, the default AWS - // managed CMK for Amazon ECR will be used. + // If you use the KMS encryption type, specify the KMS key to use for encryption. + // The alias, key ID, or full ARN of the KMS key can be specified. The key must + // exist in the same Region as the repository. If no key is specified, the default + // Amazon Web Services managed KMS key for Amazon ECR will be used. KmsKey *string noSmithyDocumentSerde @@ -105,7 +105,8 @@ type Image struct { // The manifest media type of the image. ImageManifestMediaType *string - // The AWS account ID associated with the registry containing the image. + // The Amazon Web Services account ID associated with the registry containing the + // image. RegistryId *string // The name of the repository associated with the image. @@ -147,7 +148,8 @@ type ImageDetail struct { // The list of tags associated with this image. ImageTags []string - // The AWS account ID associated with the registry to which this image belongs. + // The Amazon Web Services account ID associated with the registry to which this + // image belongs. RegistryId *string // The name of the repository to which this image belongs. @@ -243,7 +245,9 @@ type ImageScanningConfiguration struct { // The setting that determines whether images are scanned after being pushed to a // repository. If set to true, images will be scanned after being pushed. If this // parameter is not specified, it will default to false and images will not be - // scanned unless a scan is manually started with the StartImageScan API. + // scanned unless a scan is manually started with the API_StartImageScan + // (https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_StartImageScan.html) + // API. ScanOnPush bool noSmithyDocumentSerde @@ -413,13 +417,14 @@ type Repository struct { // The tag mutability setting for the repository. ImageTagMutability ImageTagMutability - // The AWS account ID associated with the registry that contains the repository. + // The Amazon Web Services account ID associated with the registry that contains + // the repository. RegistryId *string // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains - // the arn:aws:ecr namespace, followed by the region of the repository, AWS account - // ID of the repository owner, repository namespace, and repository name. For - // example, arn:aws:ecr:region:012345678910:repository/test. + // the arn:aws:ecr namespace, followed by the region of the repository, Amazon Web + // Services account ID of the repository owner, repository namespace, and + // repository name. For example, arn:aws:ecr:region:012345678910:repository/test. RepositoryArn *string // The name of the repository. diff --git a/service/iot/deserializers.go b/service/iot/deserializers.go index e538f4dc9fe..b06e3c27913 100644 --- a/service/iot/deserializers.go +++ b/service/iot/deserializers.go @@ -36919,6 +36919,11 @@ func awsRestjson1_deserializeDocumentAction(v **types.Action, value interface{}) return err } + case "openSearch": + if err := awsRestjson1_deserializeDocumentOpenSearchAction(&sv.OpenSearch, value); err != nil { + return err + } + case "republish": if err := awsRestjson1_deserializeDocumentRepublishAction(&sv.Republish, value); err != nil { return err @@ -46261,6 +46266,82 @@ func awsRestjson1_deserializeDocumentNumberList(v *[]float64, value interface{}) return nil } +func awsRestjson1_deserializeDocumentOpenSearchAction(v **types.OpenSearchAction, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.OpenSearchAction + if *v == nil { + sv = &types.OpenSearchAction{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "endpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ElasticsearchEndpoint to be of type string, got %T instead", value) + } + sv.Endpoint = ptr.String(jtv) + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ElasticsearchId to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "index": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ElasticsearchIndex to be of type string, got %T instead", value) + } + sv.Index = ptr.String(jtv) + } + + case "roleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AwsArn to be of type string, got %T instead", value) + } + sv.RoleArn = ptr.String(jtv) + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ElasticsearchType to be of type string, got %T instead", value) + } + sv.Type = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentOTAUpdateFile(v **types.OTAUpdateFile, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/iot/serializers.go b/service/iot/serializers.go index 68b90d50890..e3964487cfa 100644 --- a/service/iot/serializers.go +++ b/service/iot/serializers.go @@ -17075,6 +17075,13 @@ func awsRestjson1_serializeDocumentAction(v *types.Action, value smithyjson.Valu } } + if v.OpenSearch != nil { + ok := object.Key("openSearch") + if err := awsRestjson1_serializeDocumentOpenSearchAction(v.OpenSearch, ok); err != nil { + return err + } + } + if v.Republish != nil { ok := object.Key("republish") if err := awsRestjson1_serializeDocumentRepublishAction(v.Republish, ok); err != nil { @@ -18907,6 +18914,38 @@ func awsRestjson1_serializeDocumentNumberList(v []float64, value smithyjson.Valu return nil } +func awsRestjson1_serializeDocumentOpenSearchAction(v *types.OpenSearchAction, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Endpoint != nil { + ok := object.Key("endpoint") + ok.String(*v.Endpoint) + } + + if v.Id != nil { + ok := object.Key("id") + ok.String(*v.Id) + } + + if v.Index != nil { + ok := object.Key("index") + ok.String(*v.Index) + } + + if v.RoleArn != nil { + ok := object.Key("roleArn") + ok.String(*v.RoleArn) + } + + if v.Type != nil { + ok := object.Key("type") + ok.String(*v.Type) + } + + return nil +} + func awsRestjson1_serializeDocumentOTAUpdateFile(v *types.OTAUpdateFile, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/iot/types/types.go b/service/iot/types/types.go index 307f925c8ad..8546136389c 100644 --- a/service/iot/types/types.go +++ b/service/iot/types/types.go @@ -67,7 +67,10 @@ type Action struct { // DynamoDB column. DynamoDBv2 *DynamoDBv2Action - // Write data to an Amazon Elasticsearch Service domain. + // Write data to an Amazon Elasticsearch Service domain. This action is deprecated. + // Use the OpenSearch action + // (https://docs.aws.amazon.com/iot/latest/apireference/API_OpenSearchAction.html) + // instead. Elasticsearch *ElasticsearchAction // Write to an Amazon Kinesis Firehose stream. @@ -96,6 +99,9 @@ type Action struct { // Invoke a Lambda function. Lambda *LambdaAction + // Write data to an Amazon OpenSearch Service domain. + OpenSearch *OpenSearchAction + // Publish to another MQTT topic. Republish *RepublishAction @@ -1410,6 +1416,9 @@ type EffectivePolicy struct { } // Describes an action that writes data to an Amazon Elasticsearch Service domain. +// This action is deprecated. Use the OpenSearch action +// (https://docs.aws.amazon.com/iot/latest/apireference/API_OpenSearchAction.html) +// instead. type ElasticsearchAction struct { // The endpoint of your Elasticsearch domain. @@ -2328,6 +2337,37 @@ type NonCompliantResource struct { noSmithyDocumentSerde } +// Describes an action that writes data to an Amazon OpenSearch Service domain. +type OpenSearchAction struct { + + // The endpoint of your OpenSearch domain. + // + // This member is required. + Endpoint *string + + // The unique identifier for the document you are storing. + // + // This member is required. + Id *string + + // The OpenSearch index where you want to store your data. + // + // This member is required. + Index *string + + // The IAM role ARN that has access to OpenSearch. + // + // This member is required. + RoleArn *string + + // The type of document you are storing. + // + // This member is required. + Type *string + + noSmithyDocumentSerde +} + // Describes a file to be associated with an OTA update. type OTAUpdateFile struct { @@ -3261,7 +3301,8 @@ type ThingConnectivity struct { // false if it is not connected. Connected bool - // The reason why the client is disconnected. + // The reason why the client is disconnected. If the thing has been disconnected + // for approximately an hour, the disconnectReason value might be missing. DisconnectReason *string // The epoch time (in milliseconds) when the thing last connected or disconnected. diff --git a/service/iot/validators.go b/service/iot/validators.go index c44b6457741..0d03ba29fea 100644 --- a/service/iot/validators.go +++ b/service/iot/validators.go @@ -4495,6 +4495,11 @@ func validateAction(v *types.Action) error { invalidParams.AddNested("Kafka", err.(smithy.InvalidParamsError)) } } + if v.OpenSearch != nil { + if err := validateOpenSearchAction(v.OpenSearch); err != nil { + invalidParams.AddNested("OpenSearch", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -5363,6 +5368,33 @@ func validateMitigationActionParams(v *types.MitigationActionParams) error { } } +func validateOpenSearchAction(v *types.OpenSearchAction) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OpenSearchAction"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.Endpoint == nil { + invalidParams.Add(smithy.NewErrParamRequired("Endpoint")) + } + if v.Index == nil { + invalidParams.Add(smithy.NewErrParamRequired("Index")) + } + if v.Type == nil { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if v.Id == nil { + invalidParams.Add(smithy.NewErrParamRequired("Id")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateProvisioningHook(v *types.ProvisioningHook) error { if v == nil { return nil diff --git a/service/kafkaconnect/LICENSE.txt b/service/kafkaconnect/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/service/kafkaconnect/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/service/kafkaconnect/api_client.go b/service/kafkaconnect/api_client.go new file mode 100644 index 00000000000..97549fd2ca0 --- /dev/null +++ b/service/kafkaconnect/api_client.go @@ -0,0 +1,262 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "time" +) + +const ServiceID = "KafkaConnect" +const ServiceAPIVersion = "2021-09-14" + +// Client provides the API client to make operations call for Managed Streaming for +// Kafka Connect. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + Retryer aws.Retryer + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + if o.HTTPClient != nil { + return + } + o.HTTPClient = awshttp.NewBuildableClient() +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + o.Retryer = retry.NewStandard() +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "kafkaconnect", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/service/kafkaconnect/api_op_CreateConnector.go b/service/kafkaconnect/api_op_CreateConnector.go new file mode 100644 index 00000000000..1496221b4b8 --- /dev/null +++ b/service/kafkaconnect/api_op_CreateConnector.go @@ -0,0 +1,182 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a connector using the specified properties. +func (c *Client) CreateConnector(ctx context.Context, params *CreateConnectorInput, optFns ...func(*Options)) (*CreateConnectorOutput, error) { + if params == nil { + params = &CreateConnectorInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateConnector", params, optFns, c.addOperationCreateConnectorMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateConnectorOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateConnectorInput struct { + + // Information about the capacity allocated to the connector. Exactly one of the + // two properties must be specified. + // + // This member is required. + Capacity *types.Capacity + + // A map of keys to values that represent the configuration for the connector. + // + // This member is required. + ConnectorConfiguration map[string]string + + // The name of the connector. + // + // This member is required. + ConnectorName *string + + // Specifies which Apache Kafka cluster to connect to. + // + // This member is required. + KafkaCluster *types.KafkaCluster + + // Details of the client authentication used by the Apache Kafka cluster. + // + // This member is required. + KafkaClusterClientAuthentication *types.KafkaClusterClientAuthentication + + // Details of encryption in transit to the Apache Kafka cluster. + // + // This member is required. + KafkaClusterEncryptionInTransit *types.KafkaClusterEncryptionInTransit + + // The version of Kafka Connect. It has to be compatible with both the Apache Kafka + // cluster's version and the plugins. + // + // This member is required. + KafkaConnectVersion *string + + // Specifies which plugins to use for the connector. + // + // This member is required. + Plugins []types.Plugin + + // The Amazon Resource Name (ARN) of the IAM role used by the connector to access + // the Amazon Web Services resources that it needs. The types of resources depends + // on the logic of the connector. For example, a connector that has Amazon S3 as a + // destination must have permissions that allow it to write to the S3 destination + // bucket. + // + // This member is required. + ServiceExecutionRoleArn *string + + // A summary description of the connector. + ConnectorDescription *string + + // Details about log delivery. + LogDelivery *types.LogDelivery + + // Specifies which worker configuration to use with the connector. + WorkerConfiguration *types.WorkerConfiguration + + noSmithyDocumentSerde +} + +type CreateConnectorOutput struct { + + // The Amazon Resource Name (ARN) that Amazon assigned to the connector. + ConnectorArn *string + + // The name of the connector. + ConnectorName *string + + // The state of the connector. + ConnectorState types.ConnectorState + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateConnectorMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateConnector{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateConnector{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateConnectorValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateConnector(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateConnector(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kafkaconnect", + OperationName: "CreateConnector", + } +} diff --git a/service/kafkaconnect/api_op_CreateCustomPlugin.go b/service/kafkaconnect/api_op_CreateCustomPlugin.go new file mode 100644 index 00000000000..f50132e291e --- /dev/null +++ b/service/kafkaconnect/api_op_CreateCustomPlugin.go @@ -0,0 +1,143 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a custom plugin using the specified properties. +func (c *Client) CreateCustomPlugin(ctx context.Context, params *CreateCustomPluginInput, optFns ...func(*Options)) (*CreateCustomPluginOutput, error) { + if params == nil { + params = &CreateCustomPluginInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateCustomPlugin", params, optFns, c.addOperationCreateCustomPluginMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateCustomPluginOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateCustomPluginInput struct { + + // The type of the plugin file. + // + // This member is required. + ContentType types.CustomPluginContentType + + // Information about the location of a custom plugin. + // + // This member is required. + Location *types.CustomPluginLocation + + // The name of the custom plugin. + // + // This member is required. + Name *string + + // A summary description of the custom plugin. + Description *string + + noSmithyDocumentSerde +} + +type CreateCustomPluginOutput struct { + + // The Amazon Resource Name (ARN) that Amazon assigned to the custom plugin. + CustomPluginArn *string + + // The state of the custom plugin. + CustomPluginState types.CustomPluginState + + // The name of the custom plugin. + Name *string + + // The revision of the custom plugin. + Revision int64 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateCustomPluginMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateCustomPlugin{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateCustomPlugin{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateCustomPluginValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateCustomPlugin(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateCustomPlugin(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kafkaconnect", + OperationName: "CreateCustomPlugin", + } +} diff --git a/service/kafkaconnect/api_op_CreateWorkerConfiguration.go b/service/kafkaconnect/api_op_CreateWorkerConfiguration.go new file mode 100644 index 00000000000..9dd113335f5 --- /dev/null +++ b/service/kafkaconnect/api_op_CreateWorkerConfiguration.go @@ -0,0 +1,139 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Creates a worker configuration using the specified properties. +func (c *Client) CreateWorkerConfiguration(ctx context.Context, params *CreateWorkerConfigurationInput, optFns ...func(*Options)) (*CreateWorkerConfigurationOutput, error) { + if params == nil { + params = &CreateWorkerConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateWorkerConfiguration", params, optFns, c.addOperationCreateWorkerConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateWorkerConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateWorkerConfigurationInput struct { + + // The name of the worker configuration. + // + // This member is required. + Name *string + + // Base64 encoded contents of connect-distributed.properties file. + // + // This member is required. + PropertiesFileContent *string + + // A summary description of the worker configuration. + Description *string + + noSmithyDocumentSerde +} + +type CreateWorkerConfigurationOutput struct { + + // The time that the worker configuration was created. + CreationTime *time.Time + + // The latest revision of the worker configuration. + LatestRevision *types.WorkerConfigurationRevisionSummary + + // The name of the worker configuration. + Name *string + + // The Amazon Resource Name (ARN) that Amazon assigned to the worker configuration. + WorkerConfigurationArn *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateWorkerConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateWorkerConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateWorkerConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateWorkerConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateWorkerConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateWorkerConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kafkaconnect", + OperationName: "CreateWorkerConfiguration", + } +} diff --git a/service/kafkaconnect/api_op_DeleteConnector.go b/service/kafkaconnect/api_op_DeleteConnector.go new file mode 100644 index 00000000000..2abb41305c4 --- /dev/null +++ b/service/kafkaconnect/api_op_DeleteConnector.go @@ -0,0 +1,127 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the specified connector. +func (c *Client) DeleteConnector(ctx context.Context, params *DeleteConnectorInput, optFns ...func(*Options)) (*DeleteConnectorOutput, error) { + if params == nil { + params = &DeleteConnectorInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteConnector", params, optFns, c.addOperationDeleteConnectorMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteConnectorOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteConnectorInput struct { + + // The Amazon Resource Name (ARN) of the connector that you want to delete. + // + // This member is required. + ConnectorArn *string + + // The current version of the connector that you want to delete. + CurrentVersion *string + + noSmithyDocumentSerde +} + +type DeleteConnectorOutput struct { + + // The Amazon Resource Name (ARN) of the connector that you requested to delete. + ConnectorArn *string + + // The state of the connector that you requested to delete. + ConnectorState types.ConnectorState + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteConnectorMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpDeleteConnector{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpDeleteConnector{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteConnectorValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteConnector(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteConnector(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kafkaconnect", + OperationName: "DeleteConnector", + } +} diff --git a/service/kafkaconnect/api_op_DescribeConnector.go b/service/kafkaconnect/api_op_DescribeConnector.go new file mode 100644 index 00000000000..0e17b3f4e1d --- /dev/null +++ b/service/kafkaconnect/api_op_DescribeConnector.go @@ -0,0 +1,171 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Returns summary information about the connector. +func (c *Client) DescribeConnector(ctx context.Context, params *DescribeConnectorInput, optFns ...func(*Options)) (*DescribeConnectorOutput, error) { + if params == nil { + params = &DescribeConnectorInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeConnector", params, optFns, c.addOperationDescribeConnectorMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeConnectorOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeConnectorInput struct { + + // The Amazon Resource Name (ARN) of the connector that you want to describe. + // + // This member is required. + ConnectorArn *string + + noSmithyDocumentSerde +} + +type DescribeConnectorOutput struct { + + // Information about the capacity of the connector, whether it is auto scaled or + // provisioned. + Capacity *types.CapacityDescription + + // The Amazon Resource Name (ARN) of the connector. + ConnectorArn *string + + // A map of keys to values that represent the configuration for the connector. + ConnectorConfiguration map[string]string + + // A summary description of the connector. + ConnectorDescription *string + + // The name of the connector. + ConnectorName *string + + // The state of the connector. + ConnectorState types.ConnectorState + + // The time the connector was created. + CreationTime *time.Time + + // The current version of the connector. + CurrentVersion *string + + // The Apache Kafka cluster that the connector is connected to. + KafkaCluster *types.KafkaClusterDescription + + // The type of client authentication used to connect to the Apache Kafka cluster. + // The value is NONE when no client authentication is used. + KafkaClusterClientAuthentication *types.KafkaClusterClientAuthenticationDescription + + // Details of encryption in transit to the Apache Kafka cluster. + KafkaClusterEncryptionInTransit *types.KafkaClusterEncryptionInTransitDescription + + // The version of Kafka Connect. It has to be compatible with both the Apache Kafka + // cluster's version and the plugins. + KafkaConnectVersion *string + + // Details about delivering logs to Amazon CloudWatch Logs. + LogDelivery *types.LogDeliveryDescription + + // Specifies which plugins were used for this connector. + Plugins []types.PluginDescription + + // The Amazon Resource Name (ARN) of the IAM role used by the connector to access + // Amazon Web Services resources. + ServiceExecutionRoleArn *string + + // Specifies which worker configuration was used for the connector. + WorkerConfiguration *types.WorkerConfigurationDescription + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeConnectorMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpDescribeConnector{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpDescribeConnector{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeConnectorValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeConnector(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeConnector(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kafkaconnect", + OperationName: "DescribeConnector", + } +} diff --git a/service/kafkaconnect/api_op_DescribeCustomPlugin.go b/service/kafkaconnect/api_op_DescribeCustomPlugin.go new file mode 100644 index 00000000000..04233968cd9 --- /dev/null +++ b/service/kafkaconnect/api_op_DescribeCustomPlugin.go @@ -0,0 +1,138 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// A summary description of the custom plugin. +func (c *Client) DescribeCustomPlugin(ctx context.Context, params *DescribeCustomPluginInput, optFns ...func(*Options)) (*DescribeCustomPluginOutput, error) { + if params == nil { + params = &DescribeCustomPluginInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeCustomPlugin", params, optFns, c.addOperationDescribeCustomPluginMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeCustomPluginOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeCustomPluginInput struct { + + // Returns information about a custom plugin. + // + // This member is required. + CustomPluginArn *string + + noSmithyDocumentSerde +} + +type DescribeCustomPluginOutput struct { + + // The time that the custom plugin was created. + CreationTime *time.Time + + // The Amazon Resource Name (ARN) of the custom plugin. + CustomPluginArn *string + + // The state of the custom plugin. + CustomPluginState types.CustomPluginState + + // The description of the custom plugin. + Description *string + + // The latest successfully created revision of the custom plugin. If there are no + // successfully created revisions, this field will be absent. + LatestRevision *types.CustomPluginRevisionSummary + + // The name of the custom plugin. + Name *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeCustomPluginMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpDescribeCustomPlugin{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpDescribeCustomPlugin{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeCustomPluginValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCustomPlugin(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeCustomPlugin(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kafkaconnect", + OperationName: "DescribeCustomPlugin", + } +} diff --git a/service/kafkaconnect/api_op_DescribeWorkerConfiguration.go b/service/kafkaconnect/api_op_DescribeWorkerConfiguration.go new file mode 100644 index 00000000000..23dc294c13e --- /dev/null +++ b/service/kafkaconnect/api_op_DescribeWorkerConfiguration.go @@ -0,0 +1,135 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Returns information about a worker configuration. +func (c *Client) DescribeWorkerConfiguration(ctx context.Context, params *DescribeWorkerConfigurationInput, optFns ...func(*Options)) (*DescribeWorkerConfigurationOutput, error) { + if params == nil { + params = &DescribeWorkerConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeWorkerConfiguration", params, optFns, c.addOperationDescribeWorkerConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeWorkerConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeWorkerConfigurationInput struct { + + // The Amazon Resource Name (ARN) of the worker configuration that you want to get + // information about. + // + // This member is required. + WorkerConfigurationArn *string + + noSmithyDocumentSerde +} + +type DescribeWorkerConfigurationOutput struct { + + // The time that the worker configuration was created. + CreationTime *time.Time + + // The description of the worker configuration. + Description *string + + // The latest revision of the custom configuration. + LatestRevision *types.WorkerConfigurationRevisionDescription + + // The name of the worker configuration. + Name *string + + // The Amazon Resource Name (ARN) of the custom configuration. + WorkerConfigurationArn *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeWorkerConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpDescribeWorkerConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpDescribeWorkerConfiguration{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeWorkerConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeWorkerConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeWorkerConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kafkaconnect", + OperationName: "DescribeWorkerConfiguration", + } +} diff --git a/service/kafkaconnect/api_op_ListConnectors.go b/service/kafkaconnect/api_op_ListConnectors.go new file mode 100644 index 00000000000..0634546d709 --- /dev/null +++ b/service/kafkaconnect/api_op_ListConnectors.go @@ -0,0 +1,214 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of all the connectors in this account and Region. The list is +// limited to connectors whose name starts with the specified prefix. The response +// also includes a description of each of the listed connectors. +func (c *Client) ListConnectors(ctx context.Context, params *ListConnectorsInput, optFns ...func(*Options)) (*ListConnectorsOutput, error) { + if params == nil { + params = &ListConnectorsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListConnectors", params, optFns, c.addOperationListConnectorsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListConnectorsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListConnectorsInput struct { + + // The name prefix that you want to use to search for and list connectors. + ConnectorNamePrefix *string + + // The maximum number of connectors to list in one response. + MaxResults int32 + + // If the response of a ListConnectors operation is truncated, it will include a + // NextToken. Send this NextToken in a subsequent request to continue listing from + // where the previous operation left off. + NextToken *string + + noSmithyDocumentSerde +} + +type ListConnectorsOutput struct { + + // An array of connector descriptions. + Connectors []types.ConnectorSummary + + // If the response of a ListConnectors operation is truncated, it will include a + // NextToken. Send this NextToken in a subsequent request to continue listing from + // where it left off. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListConnectorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListConnectors{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListConnectors{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListConnectors(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListConnectorsAPIClient is a client that implements the ListConnectors +// operation. +type ListConnectorsAPIClient interface { + ListConnectors(context.Context, *ListConnectorsInput, ...func(*Options)) (*ListConnectorsOutput, error) +} + +var _ ListConnectorsAPIClient = (*Client)(nil) + +// ListConnectorsPaginatorOptions is the paginator options for ListConnectors +type ListConnectorsPaginatorOptions struct { + // The maximum number of connectors to list in one response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListConnectorsPaginator is a paginator for ListConnectors +type ListConnectorsPaginator struct { + options ListConnectorsPaginatorOptions + client ListConnectorsAPIClient + params *ListConnectorsInput + nextToken *string + firstPage bool +} + +// NewListConnectorsPaginator returns a new ListConnectorsPaginator +func NewListConnectorsPaginator(client ListConnectorsAPIClient, params *ListConnectorsInput, optFns ...func(*ListConnectorsPaginatorOptions)) *ListConnectorsPaginator { + if params == nil { + params = &ListConnectorsInput{} + } + + options := ListConnectorsPaginatorOptions{} + if params.MaxResults != 0 { + options.Limit = params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListConnectorsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListConnectorsPaginator) HasMorePages() bool { + return p.firstPage || p.nextToken != nil +} + +// NextPage retrieves the next ListConnectors page. +func (p *ListConnectorsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListConnectorsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + params.MaxResults = p.options.Limit + + result, err := p.client.ListConnectors(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListConnectors(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kafkaconnect", + OperationName: "ListConnectors", + } +} diff --git a/service/kafkaconnect/api_op_ListCustomPlugins.go b/service/kafkaconnect/api_op_ListCustomPlugins.go new file mode 100644 index 00000000000..84ea6eeac6d --- /dev/null +++ b/service/kafkaconnect/api_op_ListCustomPlugins.go @@ -0,0 +1,209 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of all of the custom plugins in this account and Region. +func (c *Client) ListCustomPlugins(ctx context.Context, params *ListCustomPluginsInput, optFns ...func(*Options)) (*ListCustomPluginsOutput, error) { + if params == nil { + params = &ListCustomPluginsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListCustomPlugins", params, optFns, c.addOperationListCustomPluginsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListCustomPluginsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListCustomPluginsInput struct { + + // The maximum number of custom plugins to list in one response. + MaxResults int32 + + // If the response of a ListCustomPlugins operation is truncated, it will include a + // NextToken. Send this NextToken in a subsequent request to continue listing from + // where the previous operation left off. + NextToken *string + + noSmithyDocumentSerde +} + +type ListCustomPluginsOutput struct { + + // An array of custom plugin descriptions. + CustomPlugins []types.CustomPluginSummary + + // If the response of a ListCustomPlugins operation is truncated, it will include a + // NextToken. Send this NextToken in a subsequent request to continue listing from + // where the previous operation left off. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListCustomPluginsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListCustomPlugins{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListCustomPlugins{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListCustomPlugins(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListCustomPluginsAPIClient is a client that implements the ListCustomPlugins +// operation. +type ListCustomPluginsAPIClient interface { + ListCustomPlugins(context.Context, *ListCustomPluginsInput, ...func(*Options)) (*ListCustomPluginsOutput, error) +} + +var _ ListCustomPluginsAPIClient = (*Client)(nil) + +// ListCustomPluginsPaginatorOptions is the paginator options for ListCustomPlugins +type ListCustomPluginsPaginatorOptions struct { + // The maximum number of custom plugins to list in one response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListCustomPluginsPaginator is a paginator for ListCustomPlugins +type ListCustomPluginsPaginator struct { + options ListCustomPluginsPaginatorOptions + client ListCustomPluginsAPIClient + params *ListCustomPluginsInput + nextToken *string + firstPage bool +} + +// NewListCustomPluginsPaginator returns a new ListCustomPluginsPaginator +func NewListCustomPluginsPaginator(client ListCustomPluginsAPIClient, params *ListCustomPluginsInput, optFns ...func(*ListCustomPluginsPaginatorOptions)) *ListCustomPluginsPaginator { + if params == nil { + params = &ListCustomPluginsInput{} + } + + options := ListCustomPluginsPaginatorOptions{} + if params.MaxResults != 0 { + options.Limit = params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListCustomPluginsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListCustomPluginsPaginator) HasMorePages() bool { + return p.firstPage || p.nextToken != nil +} + +// NextPage retrieves the next ListCustomPlugins page. +func (p *ListCustomPluginsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListCustomPluginsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + params.MaxResults = p.options.Limit + + result, err := p.client.ListCustomPlugins(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListCustomPlugins(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kafkaconnect", + OperationName: "ListCustomPlugins", + } +} diff --git a/service/kafkaconnect/api_op_ListWorkerConfigurations.go b/service/kafkaconnect/api_op_ListWorkerConfigurations.go new file mode 100644 index 00000000000..f8050bbb617 --- /dev/null +++ b/service/kafkaconnect/api_op_ListWorkerConfigurations.go @@ -0,0 +1,211 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of all of the worker configurations in this account and Region. +func (c *Client) ListWorkerConfigurations(ctx context.Context, params *ListWorkerConfigurationsInput, optFns ...func(*Options)) (*ListWorkerConfigurationsOutput, error) { + if params == nil { + params = &ListWorkerConfigurationsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListWorkerConfigurations", params, optFns, c.addOperationListWorkerConfigurationsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListWorkerConfigurationsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListWorkerConfigurationsInput struct { + + // The maximum number of worker configurations to list in one response. + MaxResults int32 + + // If the response of a ListWorkerConfigurations operation is truncated, it will + // include a NextToken. Send this NextToken in a subsequent request to continue + // listing from where the previous operation left off. + NextToken *string + + noSmithyDocumentSerde +} + +type ListWorkerConfigurationsOutput struct { + + // If the response of a ListWorkerConfigurations operation is truncated, it will + // include a NextToken. Send this NextToken in a subsequent request to continue + // listing from where the previous operation left off. + NextToken *string + + // An array of worker configuration descriptions. + WorkerConfigurations []types.WorkerConfigurationSummary + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListWorkerConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListWorkerConfigurations{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListWorkerConfigurations{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListWorkerConfigurations(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListWorkerConfigurationsAPIClient is a client that implements the +// ListWorkerConfigurations operation. +type ListWorkerConfigurationsAPIClient interface { + ListWorkerConfigurations(context.Context, *ListWorkerConfigurationsInput, ...func(*Options)) (*ListWorkerConfigurationsOutput, error) +} + +var _ ListWorkerConfigurationsAPIClient = (*Client)(nil) + +// ListWorkerConfigurationsPaginatorOptions is the paginator options for +// ListWorkerConfigurations +type ListWorkerConfigurationsPaginatorOptions struct { + // The maximum number of worker configurations to list in one response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListWorkerConfigurationsPaginator is a paginator for ListWorkerConfigurations +type ListWorkerConfigurationsPaginator struct { + options ListWorkerConfigurationsPaginatorOptions + client ListWorkerConfigurationsAPIClient + params *ListWorkerConfigurationsInput + nextToken *string + firstPage bool +} + +// NewListWorkerConfigurationsPaginator returns a new +// ListWorkerConfigurationsPaginator +func NewListWorkerConfigurationsPaginator(client ListWorkerConfigurationsAPIClient, params *ListWorkerConfigurationsInput, optFns ...func(*ListWorkerConfigurationsPaginatorOptions)) *ListWorkerConfigurationsPaginator { + if params == nil { + params = &ListWorkerConfigurationsInput{} + } + + options := ListWorkerConfigurationsPaginatorOptions{} + if params.MaxResults != 0 { + options.Limit = params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListWorkerConfigurationsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListWorkerConfigurationsPaginator) HasMorePages() bool { + return p.firstPage || p.nextToken != nil +} + +// NextPage retrieves the next ListWorkerConfigurations page. +func (p *ListWorkerConfigurationsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListWorkerConfigurationsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + params.MaxResults = p.options.Limit + + result, err := p.client.ListWorkerConfigurations(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListWorkerConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kafkaconnect", + OperationName: "ListWorkerConfigurations", + } +} diff --git a/service/kafkaconnect/api_op_UpdateConnector.go b/service/kafkaconnect/api_op_UpdateConnector.go new file mode 100644 index 00000000000..426fc3d5c7a --- /dev/null +++ b/service/kafkaconnect/api_op_UpdateConnector.go @@ -0,0 +1,134 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates the specified connector. +func (c *Client) UpdateConnector(ctx context.Context, params *UpdateConnectorInput, optFns ...func(*Options)) (*UpdateConnectorOutput, error) { + if params == nil { + params = &UpdateConnectorInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateConnector", params, optFns, c.addOperationUpdateConnectorMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateConnectorOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateConnectorInput struct { + + // The target capacity. + // + // This member is required. + Capacity *types.CapacityUpdate + + // The Amazon Resource Name (ARN) of the connector that you want to update. + // + // This member is required. + ConnectorArn *string + + // The current version of the connector that you want to update. + // + // This member is required. + CurrentVersion *string + + noSmithyDocumentSerde +} + +type UpdateConnectorOutput struct { + + // The Amazon Resource Name (ARN) of the connector. + ConnectorArn *string + + // The state of the connector. + ConnectorState types.ConnectorState + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateConnectorMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpUpdateConnector{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpUpdateConnector{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateConnectorValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateConnector(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateConnector(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kafkaconnect", + OperationName: "UpdateConnector", + } +} diff --git a/service/kafkaconnect/deserializers.go b/service/kafkaconnect/deserializers.go new file mode 100644 index 00000000000..6b68f8a6068 --- /dev/null +++ b/service/kafkaconnect/deserializers.go @@ -0,0 +1,4525 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strings" +) + +type awsRestjson1_deserializeOpCreateConnector struct { +} + +func (*awsRestjson1_deserializeOpCreateConnector) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateConnector) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateConnector(response, &metadata) + } + output := &CreateConnectorOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateConnectorOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateConnector(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateConnectorOutput(v **CreateConnectorOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateConnectorOutput + if *v == nil { + sv = &CreateConnectorOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "connectorArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ConnectorArn = ptr.String(jtv) + } + + case "connectorName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ConnectorName = ptr.String(jtv) + } + + case "connectorState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectorState to be of type string, got %T instead", value) + } + sv.ConnectorState = types.ConnectorState(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpCreateCustomPlugin struct { +} + +func (*awsRestjson1_deserializeOpCreateCustomPlugin) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateCustomPlugin) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateCustomPlugin(response, &metadata) + } + output := &CreateCustomPluginOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateCustomPluginOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateCustomPlugin(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateCustomPluginOutput(v **CreateCustomPluginOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateCustomPluginOutput + if *v == nil { + sv = &CreateCustomPluginOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "customPluginArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.CustomPluginArn = ptr.String(jtv) + } + + case "customPluginState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomPluginState to be of type string, got %T instead", value) + } + sv.CustomPluginState = types.CustomPluginState(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "revision": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Revision = i64 + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpCreateWorkerConfiguration struct { +} + +func (*awsRestjson1_deserializeOpCreateWorkerConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateWorkerConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateWorkerConfiguration(response, &metadata) + } + output := &CreateWorkerConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateWorkerConfigurationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateWorkerConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateWorkerConfigurationOutput(v **CreateWorkerConfigurationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateWorkerConfigurationOutput + if *v == nil { + sv = &CreateWorkerConfigurationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "creationTime": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __timestampIso8601 to be of type string, got %T instead", value) + } + t, err := smithytime.ParseDateTime(jtv) + if err != nil { + return err + } + sv.CreationTime = ptr.Time(t) + } + + case "latestRevision": + if err := awsRestjson1_deserializeDocumentWorkerConfigurationRevisionSummary(&sv.LatestRevision, value); err != nil { + return err + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "workerConfigurationArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.WorkerConfigurationArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpDeleteConnector struct { +} + +func (*awsRestjson1_deserializeOpDeleteConnector) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpDeleteConnector) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorDeleteConnector(response, &metadata) + } + output := &DeleteConnectorOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentDeleteConnectorOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorDeleteConnector(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentDeleteConnectorOutput(v **DeleteConnectorOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteConnectorOutput + if *v == nil { + sv = &DeleteConnectorOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "connectorArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ConnectorArn = ptr.String(jtv) + } + + case "connectorState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectorState to be of type string, got %T instead", value) + } + sv.ConnectorState = types.ConnectorState(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpDescribeConnector struct { +} + +func (*awsRestjson1_deserializeOpDescribeConnector) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpDescribeConnector) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorDescribeConnector(response, &metadata) + } + output := &DescribeConnectorOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentDescribeConnectorOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorDescribeConnector(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentDescribeConnectorOutput(v **DescribeConnectorOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeConnectorOutput + if *v == nil { + sv = &DescribeConnectorOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "capacity": + if err := awsRestjson1_deserializeDocumentCapacityDescription(&sv.Capacity, value); err != nil { + return err + } + + case "connectorArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ConnectorArn = ptr.String(jtv) + } + + case "connectorConfiguration": + if err := awsRestjson1_deserializeDocument__mapOf__string(&sv.ConnectorConfiguration, value); err != nil { + return err + } + + case "connectorDescription": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ConnectorDescription = ptr.String(jtv) + } + + case "connectorName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ConnectorName = ptr.String(jtv) + } + + case "connectorState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectorState to be of type string, got %T instead", value) + } + sv.ConnectorState = types.ConnectorState(jtv) + } + + case "creationTime": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __timestampIso8601 to be of type string, got %T instead", value) + } + t, err := smithytime.ParseDateTime(jtv) + if err != nil { + return err + } + sv.CreationTime = ptr.Time(t) + } + + case "currentVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.CurrentVersion = ptr.String(jtv) + } + + case "kafkaCluster": + if err := awsRestjson1_deserializeDocumentKafkaClusterDescription(&sv.KafkaCluster, value); err != nil { + return err + } + + case "kafkaClusterClientAuthentication": + if err := awsRestjson1_deserializeDocumentKafkaClusterClientAuthenticationDescription(&sv.KafkaClusterClientAuthentication, value); err != nil { + return err + } + + case "kafkaClusterEncryptionInTransit": + if err := awsRestjson1_deserializeDocumentKafkaClusterEncryptionInTransitDescription(&sv.KafkaClusterEncryptionInTransit, value); err != nil { + return err + } + + case "kafkaConnectVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.KafkaConnectVersion = ptr.String(jtv) + } + + case "logDelivery": + if err := awsRestjson1_deserializeDocumentLogDeliveryDescription(&sv.LogDelivery, value); err != nil { + return err + } + + case "plugins": + if err := awsRestjson1_deserializeDocument__listOfPluginDescription(&sv.Plugins, value); err != nil { + return err + } + + case "serviceExecutionRoleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ServiceExecutionRoleArn = ptr.String(jtv) + } + + case "workerConfiguration": + if err := awsRestjson1_deserializeDocumentWorkerConfigurationDescription(&sv.WorkerConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpDescribeCustomPlugin struct { +} + +func (*awsRestjson1_deserializeOpDescribeCustomPlugin) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpDescribeCustomPlugin) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorDescribeCustomPlugin(response, &metadata) + } + output := &DescribeCustomPluginOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentDescribeCustomPluginOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorDescribeCustomPlugin(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentDescribeCustomPluginOutput(v **DescribeCustomPluginOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeCustomPluginOutput + if *v == nil { + sv = &DescribeCustomPluginOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "creationTime": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __timestampIso8601 to be of type string, got %T instead", value) + } + t, err := smithytime.ParseDateTime(jtv) + if err != nil { + return err + } + sv.CreationTime = ptr.Time(t) + } + + case "customPluginArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.CustomPluginArn = ptr.String(jtv) + } + + case "customPluginState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomPluginState to be of type string, got %T instead", value) + } + sv.CustomPluginState = types.CustomPluginState(jtv) + } + + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "latestRevision": + if err := awsRestjson1_deserializeDocumentCustomPluginRevisionSummary(&sv.LatestRevision, value); err != nil { + return err + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpDescribeWorkerConfiguration struct { +} + +func (*awsRestjson1_deserializeOpDescribeWorkerConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpDescribeWorkerConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorDescribeWorkerConfiguration(response, &metadata) + } + output := &DescribeWorkerConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentDescribeWorkerConfigurationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorDescribeWorkerConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentDescribeWorkerConfigurationOutput(v **DescribeWorkerConfigurationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeWorkerConfigurationOutput + if *v == nil { + sv = &DescribeWorkerConfigurationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "creationTime": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __timestampIso8601 to be of type string, got %T instead", value) + } + t, err := smithytime.ParseDateTime(jtv) + if err != nil { + return err + } + sv.CreationTime = ptr.Time(t) + } + + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "latestRevision": + if err := awsRestjson1_deserializeDocumentWorkerConfigurationRevisionDescription(&sv.LatestRevision, value); err != nil { + return err + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "workerConfigurationArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.WorkerConfigurationArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListConnectors struct { +} + +func (*awsRestjson1_deserializeOpListConnectors) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListConnectors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListConnectors(response, &metadata) + } + output := &ListConnectorsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListConnectorsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListConnectors(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListConnectorsOutput(v **ListConnectorsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListConnectorsOutput + if *v == nil { + sv = &ListConnectorsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "connectors": + if err := awsRestjson1_deserializeDocument__listOfConnectorSummary(&sv.Connectors, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListCustomPlugins struct { +} + +func (*awsRestjson1_deserializeOpListCustomPlugins) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListCustomPlugins) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListCustomPlugins(response, &metadata) + } + output := &ListCustomPluginsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListCustomPluginsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListCustomPlugins(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListCustomPluginsOutput(v **ListCustomPluginsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListCustomPluginsOutput + if *v == nil { + sv = &ListCustomPluginsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "customPlugins": + if err := awsRestjson1_deserializeDocument__listOfCustomPluginSummary(&sv.CustomPlugins, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListWorkerConfigurations struct { +} + +func (*awsRestjson1_deserializeOpListWorkerConfigurations) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListWorkerConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListWorkerConfigurations(response, &metadata) + } + output := &ListWorkerConfigurationsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListWorkerConfigurationsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListWorkerConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListWorkerConfigurationsOutput(v **ListWorkerConfigurationsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListWorkerConfigurationsOutput + if *v == nil { + sv = &ListWorkerConfigurationsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "workerConfigurations": + if err := awsRestjson1_deserializeDocument__listOfWorkerConfigurationSummary(&sv.WorkerConfigurations, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpUpdateConnector struct { +} + +func (*awsRestjson1_deserializeOpUpdateConnector) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpUpdateConnector) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorUpdateConnector(response, &metadata) + } + output := &UpdateConnectorOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentUpdateConnectorOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorUpdateConnector(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceUnavailableException", errorCode): + return awsRestjson1_deserializeErrorServiceUnavailableException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentUpdateConnectorOutput(v **UpdateConnectorOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateConnectorOutput + if *v == nil { + sv = &UpdateConnectorOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "connectorArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ConnectorArn = ptr.String(jtv) + } + + case "connectorState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectorState to be of type string, got %T instead", value) + } + sv.ConnectorState = types.ConnectorState(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeErrorBadRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.BadRequestException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentBadRequestException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ConflictException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentConflictException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorForbiddenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ForbiddenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentForbiddenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInternalServerErrorException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InternalServerErrorException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInternalServerErrorException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NotFoundException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorServiceUnavailableException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ServiceUnavailableException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentServiceUnavailableException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorTooManyRequestsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.TooManyRequestsException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentTooManyRequestsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnauthorizedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnauthorizedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnauthorizedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocument__listOf__string(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocument__listOfConnectorSummary(v *[]types.ConnectorSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ConnectorSummary + if *v == nil { + cv = []types.ConnectorSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ConnectorSummary + destAddr := &col + if err := awsRestjson1_deserializeDocumentConnectorSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocument__listOfCustomPluginSummary(v *[]types.CustomPluginSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CustomPluginSummary + if *v == nil { + cv = []types.CustomPluginSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CustomPluginSummary + destAddr := &col + if err := awsRestjson1_deserializeDocumentCustomPluginSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocument__listOfPluginDescription(v *[]types.PluginDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.PluginDescription + if *v == nil { + cv = []types.PluginDescription{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.PluginDescription + destAddr := &col + if err := awsRestjson1_deserializeDocumentPluginDescription(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocument__listOfWorkerConfigurationSummary(v *[]types.WorkerConfigurationSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.WorkerConfigurationSummary + if *v == nil { + cv = []types.WorkerConfigurationSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.WorkerConfigurationSummary + destAddr := &col + if err := awsRestjson1_deserializeDocumentWorkerConfigurationSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocument__mapOf__string(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsRestjson1_deserializeDocumentApacheKafkaClusterDescription(v **types.ApacheKafkaClusterDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ApacheKafkaClusterDescription + if *v == nil { + sv = &types.ApacheKafkaClusterDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "bootstrapServers": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.BootstrapServers = ptr.String(jtv) + } + + case "vpc": + if err := awsRestjson1_deserializeDocumentVpcDescription(&sv.Vpc, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentAutoScalingDescription(v **types.AutoScalingDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AutoScalingDescription + if *v == nil { + sv = &types.AutoScalingDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "maxWorkerCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxWorkerCount = int32(i64) + } + + case "mcuCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.McuCount = int32(i64) + } + + case "minWorkerCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MinWorkerCount = int32(i64) + } + + case "scaleInPolicy": + if err := awsRestjson1_deserializeDocumentScaleInPolicyDescription(&sv.ScaleInPolicy, value); err != nil { + return err + } + + case "scaleOutPolicy": + if err := awsRestjson1_deserializeDocumentScaleOutPolicyDescription(&sv.ScaleOutPolicy, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentBadRequestException(v **types.BadRequestException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BadRequestException + if *v == nil { + sv = &types.BadRequestException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentCapacityDescription(v **types.CapacityDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CapacityDescription + if *v == nil { + sv = &types.CapacityDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "autoScaling": + if err := awsRestjson1_deserializeDocumentAutoScalingDescription(&sv.AutoScaling, value); err != nil { + return err + } + + case "provisionedCapacity": + if err := awsRestjson1_deserializeDocumentProvisionedCapacityDescription(&sv.ProvisionedCapacity, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentCloudWatchLogsLogDeliveryDescription(v **types.CloudWatchLogsLogDeliveryDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudWatchLogsLogDeliveryDescription + if *v == nil { + sv = &types.CloudWatchLogsLogDeliveryDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "enabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected __boolean to be of type *bool, got %T instead", value) + } + sv.Enabled = jtv + } + + case "logGroup": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.LogGroup = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentConflictException(v **types.ConflictException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ConflictException + if *v == nil { + sv = &types.ConflictException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentConnectorSummary(v **types.ConnectorSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ConnectorSummary + if *v == nil { + sv = &types.ConnectorSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "capacity": + if err := awsRestjson1_deserializeDocumentCapacityDescription(&sv.Capacity, value); err != nil { + return err + } + + case "connectorArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ConnectorArn = ptr.String(jtv) + } + + case "connectorDescription": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ConnectorDescription = ptr.String(jtv) + } + + case "connectorName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ConnectorName = ptr.String(jtv) + } + + case "connectorState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectorState to be of type string, got %T instead", value) + } + sv.ConnectorState = types.ConnectorState(jtv) + } + + case "creationTime": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __timestampIso8601 to be of type string, got %T instead", value) + } + t, err := smithytime.ParseDateTime(jtv) + if err != nil { + return err + } + sv.CreationTime = ptr.Time(t) + } + + case "currentVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.CurrentVersion = ptr.String(jtv) + } + + case "kafkaCluster": + if err := awsRestjson1_deserializeDocumentKafkaClusterDescription(&sv.KafkaCluster, value); err != nil { + return err + } + + case "kafkaClusterClientAuthentication": + if err := awsRestjson1_deserializeDocumentKafkaClusterClientAuthenticationDescription(&sv.KafkaClusterClientAuthentication, value); err != nil { + return err + } + + case "kafkaClusterEncryptionInTransit": + if err := awsRestjson1_deserializeDocumentKafkaClusterEncryptionInTransitDescription(&sv.KafkaClusterEncryptionInTransit, value); err != nil { + return err + } + + case "kafkaConnectVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.KafkaConnectVersion = ptr.String(jtv) + } + + case "logDelivery": + if err := awsRestjson1_deserializeDocumentLogDeliveryDescription(&sv.LogDelivery, value); err != nil { + return err + } + + case "plugins": + if err := awsRestjson1_deserializeDocument__listOfPluginDescription(&sv.Plugins, value); err != nil { + return err + } + + case "serviceExecutionRoleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ServiceExecutionRoleArn = ptr.String(jtv) + } + + case "workerConfiguration": + if err := awsRestjson1_deserializeDocumentWorkerConfigurationDescription(&sv.WorkerConfiguration, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentCustomPluginDescription(v **types.CustomPluginDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomPluginDescription + if *v == nil { + sv = &types.CustomPluginDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "customPluginArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.CustomPluginArn = ptr.String(jtv) + } + + case "revision": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Revision = i64 + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentCustomPluginFileDescription(v **types.CustomPluginFileDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomPluginFileDescription + if *v == nil { + sv = &types.CustomPluginFileDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "fileMd5": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.FileMd5 = ptr.String(jtv) + } + + case "fileSize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.FileSize = i64 + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentCustomPluginLocationDescription(v **types.CustomPluginLocationDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomPluginLocationDescription + if *v == nil { + sv = &types.CustomPluginLocationDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "s3Location": + if err := awsRestjson1_deserializeDocumentS3LocationDescription(&sv.S3Location, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentCustomPluginRevisionSummary(v **types.CustomPluginRevisionSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomPluginRevisionSummary + if *v == nil { + sv = &types.CustomPluginRevisionSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "contentType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomPluginContentType to be of type string, got %T instead", value) + } + sv.ContentType = types.CustomPluginContentType(jtv) + } + + case "creationTime": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __timestampIso8601 to be of type string, got %T instead", value) + } + t, err := smithytime.ParseDateTime(jtv) + if err != nil { + return err + } + sv.CreationTime = ptr.Time(t) + } + + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "fileDescription": + if err := awsRestjson1_deserializeDocumentCustomPluginFileDescription(&sv.FileDescription, value); err != nil { + return err + } + + case "location": + if err := awsRestjson1_deserializeDocumentCustomPluginLocationDescription(&sv.Location, value); err != nil { + return err + } + + case "revision": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Revision = i64 + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentCustomPluginSummary(v **types.CustomPluginSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomPluginSummary + if *v == nil { + sv = &types.CustomPluginSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "creationTime": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __timestampIso8601 to be of type string, got %T instead", value) + } + t, err := smithytime.ParseDateTime(jtv) + if err != nil { + return err + } + sv.CreationTime = ptr.Time(t) + } + + case "customPluginArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.CustomPluginArn = ptr.String(jtv) + } + + case "customPluginState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomPluginState to be of type string, got %T instead", value) + } + sv.CustomPluginState = types.CustomPluginState(jtv) + } + + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "latestRevision": + if err := awsRestjson1_deserializeDocumentCustomPluginRevisionSummary(&sv.LatestRevision, value); err != nil { + return err + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentFirehoseLogDeliveryDescription(v **types.FirehoseLogDeliveryDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.FirehoseLogDeliveryDescription + if *v == nil { + sv = &types.FirehoseLogDeliveryDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "deliveryStream": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.DeliveryStream = ptr.String(jtv) + } + + case "enabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected __boolean to be of type *bool, got %T instead", value) + } + sv.Enabled = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentForbiddenException(v **types.ForbiddenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ForbiddenException + if *v == nil { + sv = &types.ForbiddenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInternalServerErrorException(v **types.InternalServerErrorException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InternalServerErrorException + if *v == nil { + sv = &types.InternalServerErrorException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentKafkaClusterClientAuthenticationDescription(v **types.KafkaClusterClientAuthenticationDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KafkaClusterClientAuthenticationDescription + if *v == nil { + sv = &types.KafkaClusterClientAuthenticationDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "authenticationType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KafkaClusterClientAuthenticationType to be of type string, got %T instead", value) + } + sv.AuthenticationType = types.KafkaClusterClientAuthenticationType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentKafkaClusterDescription(v **types.KafkaClusterDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KafkaClusterDescription + if *v == nil { + sv = &types.KafkaClusterDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "apacheKafkaCluster": + if err := awsRestjson1_deserializeDocumentApacheKafkaClusterDescription(&sv.ApacheKafkaCluster, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentKafkaClusterEncryptionInTransitDescription(v **types.KafkaClusterEncryptionInTransitDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KafkaClusterEncryptionInTransitDescription + if *v == nil { + sv = &types.KafkaClusterEncryptionInTransitDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "encryptionType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KafkaClusterEncryptionInTransitType to be of type string, got %T instead", value) + } + sv.EncryptionType = types.KafkaClusterEncryptionInTransitType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentLogDeliveryDescription(v **types.LogDeliveryDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LogDeliveryDescription + if *v == nil { + sv = &types.LogDeliveryDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "workerLogDelivery": + if err := awsRestjson1_deserializeDocumentWorkerLogDeliveryDescription(&sv.WorkerLogDelivery, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentNotFoundException(v **types.NotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NotFoundException + if *v == nil { + sv = &types.NotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentPluginDescription(v **types.PluginDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PluginDescription + if *v == nil { + sv = &types.PluginDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "customPlugin": + if err := awsRestjson1_deserializeDocumentCustomPluginDescription(&sv.CustomPlugin, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentProvisionedCapacityDescription(v **types.ProvisionedCapacityDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ProvisionedCapacityDescription + if *v == nil { + sv = &types.ProvisionedCapacityDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "mcuCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.McuCount = int32(i64) + } + + case "workerCount": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.WorkerCount = int32(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentS3LocationDescription(v **types.S3LocationDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.S3LocationDescription + if *v == nil { + sv = &types.S3LocationDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "bucketArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.BucketArn = ptr.String(jtv) + } + + case "fileKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.FileKey = ptr.String(jtv) + } + + case "objectVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ObjectVersion = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentS3LogDeliveryDescription(v **types.S3LogDeliveryDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.S3LogDeliveryDescription + if *v == nil { + sv = &types.S3LogDeliveryDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "bucket": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Bucket = ptr.String(jtv) + } + + case "enabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected __boolean to be of type *bool, got %T instead", value) + } + sv.Enabled = jtv + } + + case "prefix": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Prefix = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentScaleInPolicyDescription(v **types.ScaleInPolicyDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ScaleInPolicyDescription + if *v == nil { + sv = &types.ScaleInPolicyDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "cpuUtilizationPercentage": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.CpuUtilizationPercentage = int32(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentScaleOutPolicyDescription(v **types.ScaleOutPolicyDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ScaleOutPolicyDescription + if *v == nil { + sv = &types.ScaleOutPolicyDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "cpuUtilizationPercentage": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.CpuUtilizationPercentage = int32(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentServiceUnavailableException(v **types.ServiceUnavailableException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ServiceUnavailableException + if *v == nil { + sv = &types.ServiceUnavailableException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TooManyRequestsException + if *v == nil { + sv = &types.TooManyRequestsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.UnauthorizedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnauthorizedException + if *v == nil { + sv = &types.UnauthorizedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentVpcDescription(v **types.VpcDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.VpcDescription + if *v == nil { + sv = &types.VpcDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "securityGroups": + if err := awsRestjson1_deserializeDocument__listOf__string(&sv.SecurityGroups, value); err != nil { + return err + } + + case "subnets": + if err := awsRestjson1_deserializeDocument__listOf__string(&sv.Subnets, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentWorkerConfigurationDescription(v **types.WorkerConfigurationDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.WorkerConfigurationDescription + if *v == nil { + sv = &types.WorkerConfigurationDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "revision": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Revision = i64 + } + + case "workerConfigurationArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.WorkerConfigurationArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentWorkerConfigurationRevisionDescription(v **types.WorkerConfigurationRevisionDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.WorkerConfigurationRevisionDescription + if *v == nil { + sv = &types.WorkerConfigurationRevisionDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "creationTime": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __timestampIso8601 to be of type string, got %T instead", value) + } + t, err := smithytime.ParseDateTime(jtv) + if err != nil { + return err + } + sv.CreationTime = ptr.Time(t) + } + + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "propertiesFileContent": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.PropertiesFileContent = ptr.String(jtv) + } + + case "revision": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Revision = i64 + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentWorkerConfigurationRevisionSummary(v **types.WorkerConfigurationRevisionSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.WorkerConfigurationRevisionSummary + if *v == nil { + sv = &types.WorkerConfigurationRevisionSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "creationTime": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __timestampIso8601 to be of type string, got %T instead", value) + } + t, err := smithytime.ParseDateTime(jtv) + if err != nil { + return err + } + sv.CreationTime = ptr.Time(t) + } + + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "revision": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Revision = i64 + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentWorkerConfigurationSummary(v **types.WorkerConfigurationSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.WorkerConfigurationSummary + if *v == nil { + sv = &types.WorkerConfigurationSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "creationTime": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __timestampIso8601 to be of type string, got %T instead", value) + } + t, err := smithytime.ParseDateTime(jtv) + if err != nil { + return err + } + sv.CreationTime = ptr.Time(t) + } + + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "latestRevision": + if err := awsRestjson1_deserializeDocumentWorkerConfigurationRevisionSummary(&sv.LatestRevision, value); err != nil { + return err + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "workerConfigurationArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.WorkerConfigurationArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentWorkerLogDeliveryDescription(v **types.WorkerLogDeliveryDescription, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.WorkerLogDeliveryDescription + if *v == nil { + sv = &types.WorkerLogDeliveryDescription{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "cloudWatchLogs": + if err := awsRestjson1_deserializeDocumentCloudWatchLogsLogDeliveryDescription(&sv.CloudWatchLogs, value); err != nil { + return err + } + + case "firehose": + if err := awsRestjson1_deserializeDocumentFirehoseLogDeliveryDescription(&sv.Firehose, value); err != nil { + return err + } + + case "s3": + if err := awsRestjson1_deserializeDocumentS3LogDeliveryDescription(&sv.S3, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/service/kafkaconnect/doc.go b/service/kafkaconnect/doc.go new file mode 100644 index 00000000000..1b3764661f8 --- /dev/null +++ b/service/kafkaconnect/doc.go @@ -0,0 +1,7 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package kafkaconnect provides the API client, operations, and parameter types +// for Managed Streaming for Kafka Connect. +// +// +package kafkaconnect diff --git a/service/kafkaconnect/endpoints.go b/service/kafkaconnect/endpoints.go new file mode 100644 index 00000000000..421c96d6d54 --- /dev/null +++ b/service/kafkaconnect/endpoints.go @@ -0,0 +1,160 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), m.Options) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "kafkaconnect" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolver + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint +// resolution to the awsResolver. If awsResolver returns aws.EndpointNotFoundError +// error, the resolver will use the the provided fallbackResolver for resolution. +// awsResolver and fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, fallbackResolver EndpointResolver) EndpointResolver { + return &wrappedEndpointResolver{ + awsResolver: awsResolver, + resolver: fallbackResolver, + } +} diff --git a/service/kafkaconnect/generated.json b/service/kafkaconnect/generated.json new file mode 100644 index 00000000000..f69df3fa947 --- /dev/null +++ b/service/kafkaconnect/generated.json @@ -0,0 +1,35 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_op_CreateConnector.go", + "api_op_CreateCustomPlugin.go", + "api_op_CreateWorkerConfiguration.go", + "api_op_DeleteConnector.go", + "api_op_DescribeConnector.go", + "api_op_DescribeCustomPlugin.go", + "api_op_DescribeWorkerConfiguration.go", + "api_op_ListConnectors.go", + "api_op_ListCustomPlugins.go", + "api_op_ListWorkerConfigurations.go", + "api_op_UpdateConnector.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/kafkaconnect", + "unstable": false +} diff --git a/service/kafkaconnect/go.mod b/service/kafkaconnect/go.mod new file mode 100644 index 00000000000..5bf100a0b2a --- /dev/null +++ b/service/kafkaconnect/go.mod @@ -0,0 +1,10 @@ +module github.com/aws/aws-sdk-go-v2/service/kafkaconnect + +go 1.15 + +require ( + github.com/aws/aws-sdk-go-v2 v1.9.0 + github.com/aws/smithy-go v1.8.0 +) + +replace github.com/aws/aws-sdk-go-v2 => ../../ diff --git a/service/kafkaconnect/go.sum b/service/kafkaconnect/go.sum new file mode 100644 index 00000000000..779915f8ab7 --- /dev/null +++ b/service/kafkaconnect/go.sum @@ -0,0 +1,14 @@ +github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/service/kafkaconnect/go_module_metadata.go b/service/kafkaconnect/go_module_metadata.go new file mode 100644 index 00000000000..b00fdd1c7a9 --- /dev/null +++ b/service/kafkaconnect/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package kafkaconnect + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "tip" diff --git a/service/kafkaconnect/internal/endpoints/endpoints.go b/service/kafkaconnect/internal/endpoints/endpoints.go new file mode 100644 index 00000000000..3fff27576f1 --- /dev/null +++ b/service/kafkaconnect/internal/endpoints/endpoints.go @@ -0,0 +1,106 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + DisableHTTPS bool +} + +// Resolver KafkaConnect endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := endpoints.Options{ + DisableHTTPS: options.DisableHTTPS, + } + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: endpoints.Endpoint{ + Hostname: "kafkaconnect.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + }, + { + ID: "aws-cn", + Defaults: endpoints.Endpoint{ + Hostname: "kafkaconnect.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: endpoints.Endpoint{ + Hostname: "kafkaconnect.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: endpoints.Endpoint{ + Hostname: "kafkaconnect.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: endpoints.Endpoint{ + Hostname: "kafkaconnect.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + }, +} diff --git a/service/kafkaconnect/internal/endpoints/endpoints_test.go b/service/kafkaconnect/internal/endpoints/endpoints_test.go new file mode 100644 index 00000000000..08e5da2d833 --- /dev/null +++ b/service/kafkaconnect/internal/endpoints/endpoints_test.go @@ -0,0 +1,11 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "testing" +) + +func TestRegexCompile(t *testing.T) { + _ = defaultPartitions +} diff --git a/service/kafkaconnect/protocol_test.go b/service/kafkaconnect/protocol_test.go new file mode 100644 index 00000000000..ffa5ce760ed --- /dev/null +++ b/service/kafkaconnect/protocol_test.go @@ -0,0 +1,3 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect diff --git a/service/kafkaconnect/serializers.go b/service/kafkaconnect/serializers.go new file mode 100644 index 00000000000..69a623030e6 --- /dev/null +++ b/service/kafkaconnect/serializers.go @@ -0,0 +1,1306 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpCreateConnector struct { +} + +func (*awsRestjson1_serializeOpCreateConnector) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateConnector) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateConnectorInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/connectors") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateConnectorInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateConnectorInput(v *CreateConnectorInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateConnectorInput(v *CreateConnectorInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Capacity != nil { + ok := object.Key("capacity") + if err := awsRestjson1_serializeDocumentCapacity(v.Capacity, ok); err != nil { + return err + } + } + + if v.ConnectorConfiguration != nil { + ok := object.Key("connectorConfiguration") + if err := awsRestjson1_serializeDocument__mapOf__string(v.ConnectorConfiguration, ok); err != nil { + return err + } + } + + if v.ConnectorDescription != nil { + ok := object.Key("connectorDescription") + ok.String(*v.ConnectorDescription) + } + + if v.ConnectorName != nil { + ok := object.Key("connectorName") + ok.String(*v.ConnectorName) + } + + if v.KafkaCluster != nil { + ok := object.Key("kafkaCluster") + if err := awsRestjson1_serializeDocumentKafkaCluster(v.KafkaCluster, ok); err != nil { + return err + } + } + + if v.KafkaClusterClientAuthentication != nil { + ok := object.Key("kafkaClusterClientAuthentication") + if err := awsRestjson1_serializeDocumentKafkaClusterClientAuthentication(v.KafkaClusterClientAuthentication, ok); err != nil { + return err + } + } + + if v.KafkaClusterEncryptionInTransit != nil { + ok := object.Key("kafkaClusterEncryptionInTransit") + if err := awsRestjson1_serializeDocumentKafkaClusterEncryptionInTransit(v.KafkaClusterEncryptionInTransit, ok); err != nil { + return err + } + } + + if v.KafkaConnectVersion != nil { + ok := object.Key("kafkaConnectVersion") + ok.String(*v.KafkaConnectVersion) + } + + if v.LogDelivery != nil { + ok := object.Key("logDelivery") + if err := awsRestjson1_serializeDocumentLogDelivery(v.LogDelivery, ok); err != nil { + return err + } + } + + if v.Plugins != nil { + ok := object.Key("plugins") + if err := awsRestjson1_serializeDocument__listOfPlugin(v.Plugins, ok); err != nil { + return err + } + } + + if v.ServiceExecutionRoleArn != nil { + ok := object.Key("serviceExecutionRoleArn") + ok.String(*v.ServiceExecutionRoleArn) + } + + if v.WorkerConfiguration != nil { + ok := object.Key("workerConfiguration") + if err := awsRestjson1_serializeDocumentWorkerConfiguration(v.WorkerConfiguration, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpCreateCustomPlugin struct { +} + +func (*awsRestjson1_serializeOpCreateCustomPlugin) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateCustomPlugin) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateCustomPluginInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/custom-plugins") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateCustomPluginInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateCustomPluginInput(v *CreateCustomPluginInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateCustomPluginInput(v *CreateCustomPluginInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ContentType) > 0 { + ok := object.Key("contentType") + ok.String(string(v.ContentType)) + } + + if v.Description != nil { + ok := object.Key("description") + ok.String(*v.Description) + } + + if v.Location != nil { + ok := object.Key("location") + if err := awsRestjson1_serializeDocumentCustomPluginLocation(v.Location, ok); err != nil { + return err + } + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + return nil +} + +type awsRestjson1_serializeOpCreateWorkerConfiguration struct { +} + +func (*awsRestjson1_serializeOpCreateWorkerConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateWorkerConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateWorkerConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/worker-configurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateWorkerConfigurationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateWorkerConfigurationInput(v *CreateWorkerConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateWorkerConfigurationInput(v *CreateWorkerConfigurationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("description") + ok.String(*v.Description) + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + if v.PropertiesFileContent != nil { + ok := object.Key("propertiesFileContent") + ok.String(*v.PropertiesFileContent) + } + + return nil +} + +type awsRestjson1_serializeOpDeleteConnector struct { +} + +func (*awsRestjson1_serializeOpDeleteConnector) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpDeleteConnector) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteConnectorInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/connectors/{connectorArn}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsDeleteConnectorInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsDeleteConnectorInput(v *DeleteConnectorInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ConnectorArn == nil || len(*v.ConnectorArn) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member connectorArn must not be empty")} + } + if v.ConnectorArn != nil { + if err := encoder.SetURI("connectorArn").String(*v.ConnectorArn); err != nil { + return err + } + } + + if v.CurrentVersion != nil { + encoder.SetQuery("currentVersion").String(*v.CurrentVersion) + } + + return nil +} + +type awsRestjson1_serializeOpDescribeConnector struct { +} + +func (*awsRestjson1_serializeOpDescribeConnector) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpDescribeConnector) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeConnectorInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/connectors/{connectorArn}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsDescribeConnectorInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsDescribeConnectorInput(v *DescribeConnectorInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ConnectorArn == nil || len(*v.ConnectorArn) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member connectorArn must not be empty")} + } + if v.ConnectorArn != nil { + if err := encoder.SetURI("connectorArn").String(*v.ConnectorArn); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpDescribeCustomPlugin struct { +} + +func (*awsRestjson1_serializeOpDescribeCustomPlugin) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpDescribeCustomPlugin) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeCustomPluginInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/custom-plugins/{customPluginArn}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsDescribeCustomPluginInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsDescribeCustomPluginInput(v *DescribeCustomPluginInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.CustomPluginArn == nil || len(*v.CustomPluginArn) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member customPluginArn must not be empty")} + } + if v.CustomPluginArn != nil { + if err := encoder.SetURI("customPluginArn").String(*v.CustomPluginArn); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpDescribeWorkerConfiguration struct { +} + +func (*awsRestjson1_serializeOpDescribeWorkerConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpDescribeWorkerConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeWorkerConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/worker-configurations/{workerConfigurationArn}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsDescribeWorkerConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsDescribeWorkerConfigurationInput(v *DescribeWorkerConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.WorkerConfigurationArn == nil || len(*v.WorkerConfigurationArn) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member workerConfigurationArn must not be empty")} + } + if v.WorkerConfigurationArn != nil { + if err := encoder.SetURI("workerConfigurationArn").String(*v.WorkerConfigurationArn); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpListConnectors struct { +} + +func (*awsRestjson1_serializeOpListConnectors) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListConnectors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListConnectorsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/connectors") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListConnectorsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListConnectorsInput(v *ListConnectorsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ConnectorNamePrefix != nil { + encoder.SetQuery("connectorNamePrefix").String(*v.ConnectorNamePrefix) + } + + if v.MaxResults != 0 { + encoder.SetQuery("maxResults").Integer(v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("nextToken").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpListCustomPlugins struct { +} + +func (*awsRestjson1_serializeOpListCustomPlugins) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListCustomPlugins) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListCustomPluginsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/custom-plugins") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListCustomPluginsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListCustomPluginsInput(v *ListCustomPluginsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.MaxResults != 0 { + encoder.SetQuery("maxResults").Integer(v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("nextToken").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpListWorkerConfigurations struct { +} + +func (*awsRestjson1_serializeOpListWorkerConfigurations) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListWorkerConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListWorkerConfigurationsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/worker-configurations") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListWorkerConfigurationsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListWorkerConfigurationsInput(v *ListWorkerConfigurationsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.MaxResults != 0 { + encoder.SetQuery("maxResults").Integer(v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("nextToken").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpUpdateConnector struct { +} + +func (*awsRestjson1_serializeOpUpdateConnector) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpUpdateConnector) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateConnectorInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/connectors/{connectorArn}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsUpdateConnectorInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentUpdateConnectorInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsUpdateConnectorInput(v *UpdateConnectorInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ConnectorArn == nil || len(*v.ConnectorArn) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member connectorArn must not be empty")} + } + if v.ConnectorArn != nil { + if err := encoder.SetURI("connectorArn").String(*v.ConnectorArn); err != nil { + return err + } + } + + if v.CurrentVersion != nil { + encoder.SetQuery("currentVersion").String(*v.CurrentVersion) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentUpdateConnectorInput(v *UpdateConnectorInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Capacity != nil { + ok := object.Key("capacity") + if err := awsRestjson1_serializeDocumentCapacityUpdate(v.Capacity, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocument__listOf__string(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsRestjson1_serializeDocument__listOfPlugin(v []types.Plugin, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentPlugin(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocument__mapOf__string(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} + +func awsRestjson1_serializeDocumentApacheKafkaCluster(v *types.ApacheKafkaCluster, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BootstrapServers != nil { + ok := object.Key("bootstrapServers") + ok.String(*v.BootstrapServers) + } + + if v.Vpc != nil { + ok := object.Key("vpc") + if err := awsRestjson1_serializeDocumentVpc(v.Vpc, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentAutoScaling(v *types.AutoScaling, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("maxWorkerCount") + ok.Integer(v.MaxWorkerCount) + } + + { + ok := object.Key("mcuCount") + ok.Integer(v.McuCount) + } + + { + ok := object.Key("minWorkerCount") + ok.Integer(v.MinWorkerCount) + } + + if v.ScaleInPolicy != nil { + ok := object.Key("scaleInPolicy") + if err := awsRestjson1_serializeDocumentScaleInPolicy(v.ScaleInPolicy, ok); err != nil { + return err + } + } + + if v.ScaleOutPolicy != nil { + ok := object.Key("scaleOutPolicy") + if err := awsRestjson1_serializeDocumentScaleOutPolicy(v.ScaleOutPolicy, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentAutoScalingUpdate(v *types.AutoScalingUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("maxWorkerCount") + ok.Integer(v.MaxWorkerCount) + } + + { + ok := object.Key("mcuCount") + ok.Integer(v.McuCount) + } + + { + ok := object.Key("minWorkerCount") + ok.Integer(v.MinWorkerCount) + } + + if v.ScaleInPolicy != nil { + ok := object.Key("scaleInPolicy") + if err := awsRestjson1_serializeDocumentScaleInPolicyUpdate(v.ScaleInPolicy, ok); err != nil { + return err + } + } + + if v.ScaleOutPolicy != nil { + ok := object.Key("scaleOutPolicy") + if err := awsRestjson1_serializeDocumentScaleOutPolicyUpdate(v.ScaleOutPolicy, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentCapacity(v *types.Capacity, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AutoScaling != nil { + ok := object.Key("autoScaling") + if err := awsRestjson1_serializeDocumentAutoScaling(v.AutoScaling, ok); err != nil { + return err + } + } + + if v.ProvisionedCapacity != nil { + ok := object.Key("provisionedCapacity") + if err := awsRestjson1_serializeDocumentProvisionedCapacity(v.ProvisionedCapacity, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentCapacityUpdate(v *types.CapacityUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AutoScaling != nil { + ok := object.Key("autoScaling") + if err := awsRestjson1_serializeDocumentAutoScalingUpdate(v.AutoScaling, ok); err != nil { + return err + } + } + + if v.ProvisionedCapacity != nil { + ok := object.Key("provisionedCapacity") + if err := awsRestjson1_serializeDocumentProvisionedCapacityUpdate(v.ProvisionedCapacity, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentCloudWatchLogsLogDelivery(v *types.CloudWatchLogsLogDelivery, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("enabled") + ok.Boolean(v.Enabled) + } + + if v.LogGroup != nil { + ok := object.Key("logGroup") + ok.String(*v.LogGroup) + } + + return nil +} + +func awsRestjson1_serializeDocumentCustomPlugin(v *types.CustomPlugin, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomPluginArn != nil { + ok := object.Key("customPluginArn") + ok.String(*v.CustomPluginArn) + } + + { + ok := object.Key("revision") + ok.Long(v.Revision) + } + + return nil +} + +func awsRestjson1_serializeDocumentCustomPluginLocation(v *types.CustomPluginLocation, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.S3Location != nil { + ok := object.Key("s3Location") + if err := awsRestjson1_serializeDocumentS3Location(v.S3Location, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentFirehoseLogDelivery(v *types.FirehoseLogDelivery, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DeliveryStream != nil { + ok := object.Key("deliveryStream") + ok.String(*v.DeliveryStream) + } + + { + ok := object.Key("enabled") + ok.Boolean(v.Enabled) + } + + return nil +} + +func awsRestjson1_serializeDocumentKafkaCluster(v *types.KafkaCluster, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ApacheKafkaCluster != nil { + ok := object.Key("apacheKafkaCluster") + if err := awsRestjson1_serializeDocumentApacheKafkaCluster(v.ApacheKafkaCluster, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentKafkaClusterClientAuthentication(v *types.KafkaClusterClientAuthentication, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.AuthenticationType) > 0 { + ok := object.Key("authenticationType") + ok.String(string(v.AuthenticationType)) + } + + return nil +} + +func awsRestjson1_serializeDocumentKafkaClusterEncryptionInTransit(v *types.KafkaClusterEncryptionInTransit, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.EncryptionType) > 0 { + ok := object.Key("encryptionType") + ok.String(string(v.EncryptionType)) + } + + return nil +} + +func awsRestjson1_serializeDocumentLogDelivery(v *types.LogDelivery, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.WorkerLogDelivery != nil { + ok := object.Key("workerLogDelivery") + if err := awsRestjson1_serializeDocumentWorkerLogDelivery(v.WorkerLogDelivery, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentPlugin(v *types.Plugin, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomPlugin != nil { + ok := object.Key("customPlugin") + if err := awsRestjson1_serializeDocumentCustomPlugin(v.CustomPlugin, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentProvisionedCapacity(v *types.ProvisionedCapacity, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("mcuCount") + ok.Integer(v.McuCount) + } + + { + ok := object.Key("workerCount") + ok.Integer(v.WorkerCount) + } + + return nil +} + +func awsRestjson1_serializeDocumentProvisionedCapacityUpdate(v *types.ProvisionedCapacityUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("mcuCount") + ok.Integer(v.McuCount) + } + + { + ok := object.Key("workerCount") + ok.Integer(v.WorkerCount) + } + + return nil +} + +func awsRestjson1_serializeDocumentS3Location(v *types.S3Location, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BucketArn != nil { + ok := object.Key("bucketArn") + ok.String(*v.BucketArn) + } + + if v.FileKey != nil { + ok := object.Key("fileKey") + ok.String(*v.FileKey) + } + + if v.ObjectVersion != nil { + ok := object.Key("objectVersion") + ok.String(*v.ObjectVersion) + } + + return nil +} + +func awsRestjson1_serializeDocumentS3LogDelivery(v *types.S3LogDelivery, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Bucket != nil { + ok := object.Key("bucket") + ok.String(*v.Bucket) + } + + { + ok := object.Key("enabled") + ok.Boolean(v.Enabled) + } + + if v.Prefix != nil { + ok := object.Key("prefix") + ok.String(*v.Prefix) + } + + return nil +} + +func awsRestjson1_serializeDocumentScaleInPolicy(v *types.ScaleInPolicy, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("cpuUtilizationPercentage") + ok.Integer(v.CpuUtilizationPercentage) + } + + return nil +} + +func awsRestjson1_serializeDocumentScaleInPolicyUpdate(v *types.ScaleInPolicyUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("cpuUtilizationPercentage") + ok.Integer(v.CpuUtilizationPercentage) + } + + return nil +} + +func awsRestjson1_serializeDocumentScaleOutPolicy(v *types.ScaleOutPolicy, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("cpuUtilizationPercentage") + ok.Integer(v.CpuUtilizationPercentage) + } + + return nil +} + +func awsRestjson1_serializeDocumentScaleOutPolicyUpdate(v *types.ScaleOutPolicyUpdate, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("cpuUtilizationPercentage") + ok.Integer(v.CpuUtilizationPercentage) + } + + return nil +} + +func awsRestjson1_serializeDocumentVpc(v *types.Vpc, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.SecurityGroups != nil { + ok := object.Key("securityGroups") + if err := awsRestjson1_serializeDocument__listOf__string(v.SecurityGroups, ok); err != nil { + return err + } + } + + if v.Subnets != nil { + ok := object.Key("subnets") + if err := awsRestjson1_serializeDocument__listOf__string(v.Subnets, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentWorkerConfiguration(v *types.WorkerConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + { + ok := object.Key("revision") + ok.Long(v.Revision) + } + + if v.WorkerConfigurationArn != nil { + ok := object.Key("workerConfigurationArn") + ok.String(*v.WorkerConfigurationArn) + } + + return nil +} + +func awsRestjson1_serializeDocumentWorkerLogDelivery(v *types.WorkerLogDelivery, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CloudWatchLogs != nil { + ok := object.Key("cloudWatchLogs") + if err := awsRestjson1_serializeDocumentCloudWatchLogsLogDelivery(v.CloudWatchLogs, ok); err != nil { + return err + } + } + + if v.Firehose != nil { + ok := object.Key("firehose") + if err := awsRestjson1_serializeDocumentFirehoseLogDelivery(v.Firehose, ok); err != nil { + return err + } + } + + if v.S3 != nil { + ok := object.Key("s3") + if err := awsRestjson1_serializeDocumentS3LogDelivery(v.S3, ok); err != nil { + return err + } + } + + return nil +} diff --git a/service/kafkaconnect/types/enums.go b/service/kafkaconnect/types/enums.go new file mode 100644 index 00000000000..6491c2c4142 --- /dev/null +++ b/service/kafkaconnect/types/enums.go @@ -0,0 +1,109 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type ConnectorState string + +// Enum values for ConnectorState +const ( + ConnectorStateRunning ConnectorState = "RUNNING" + ConnectorStateCreating ConnectorState = "CREATING" + ConnectorStateUpdating ConnectorState = "UPDATING" + ConnectorStateDeleting ConnectorState = "DELETING" + ConnectorStateFailed ConnectorState = "FAILED" +) + +// Values returns all known values for ConnectorState. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ConnectorState) Values() []ConnectorState { + return []ConnectorState{ + "RUNNING", + "CREATING", + "UPDATING", + "DELETING", + "FAILED", + } +} + +type CustomPluginContentType string + +// Enum values for CustomPluginContentType +const ( + CustomPluginContentTypeJar CustomPluginContentType = "JAR" + CustomPluginContentTypeZip CustomPluginContentType = "ZIP" +) + +// Values returns all known values for CustomPluginContentType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (CustomPluginContentType) Values() []CustomPluginContentType { + return []CustomPluginContentType{ + "JAR", + "ZIP", + } +} + +type CustomPluginState string + +// Enum values for CustomPluginState +const ( + CustomPluginStateCreating CustomPluginState = "CREATING" + CustomPluginStateCreateFailed CustomPluginState = "CREATE_FAILED" + CustomPluginStateActive CustomPluginState = "ACTIVE" + CustomPluginStateUpdating CustomPluginState = "UPDATING" + CustomPluginStateUpdateFailed CustomPluginState = "UPDATE_FAILED" + CustomPluginStateDeleting CustomPluginState = "DELETING" +) + +// Values returns all known values for CustomPluginState. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (CustomPluginState) Values() []CustomPluginState { + return []CustomPluginState{ + "CREATING", + "CREATE_FAILED", + "ACTIVE", + "UPDATING", + "UPDATE_FAILED", + "DELETING", + } +} + +type KafkaClusterClientAuthenticationType string + +// Enum values for KafkaClusterClientAuthenticationType +const ( + KafkaClusterClientAuthenticationTypeNone KafkaClusterClientAuthenticationType = "NONE" + KafkaClusterClientAuthenticationTypeIam KafkaClusterClientAuthenticationType = "IAM" +) + +// Values returns all known values for KafkaClusterClientAuthenticationType. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (KafkaClusterClientAuthenticationType) Values() []KafkaClusterClientAuthenticationType { + return []KafkaClusterClientAuthenticationType{ + "NONE", + "IAM", + } +} + +type KafkaClusterEncryptionInTransitType string + +// Enum values for KafkaClusterEncryptionInTransitType +const ( + KafkaClusterEncryptionInTransitTypePlaintext KafkaClusterEncryptionInTransitType = "PLAINTEXT" + KafkaClusterEncryptionInTransitTypeTls KafkaClusterEncryptionInTransitType = "TLS" +) + +// Values returns all known values for KafkaClusterEncryptionInTransitType. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (KafkaClusterEncryptionInTransitType) Values() []KafkaClusterEncryptionInTransitType { + return []KafkaClusterEncryptionInTransitType{ + "PLAINTEXT", + "TLS", + } +} diff --git a/service/kafkaconnect/types/errors.go b/service/kafkaconnect/types/errors.go new file mode 100644 index 00000000000..8ff198370af --- /dev/null +++ b/service/kafkaconnect/types/errors.go @@ -0,0 +1,167 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// HTTP Status Code 400: Bad request due to incorrect input. Correct your request +// and then retry it. +type BadRequestException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *BadRequestException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *BadRequestException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *BadRequestException) ErrorCode() string { return "BadRequestException" } +func (e *BadRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// HTTP Status Code 409: Conflict. A resource with this name already exists. Retry +// your request with another name. +type ConflictException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ConflictException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ConflictException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ConflictException) ErrorCode() string { return "ConflictException" } +func (e *ConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// HTTP Status Code 403: Access forbidden. Correct your credentials and then retry +// your request. +type ForbiddenException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ForbiddenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ForbiddenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ForbiddenException) ErrorCode() string { return "ForbiddenException" } +func (e *ForbiddenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// HTTP Status Code 500: Unexpected internal server error. Retrying your request +// might resolve the issue. +type InternalServerErrorException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InternalServerErrorException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InternalServerErrorException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InternalServerErrorException) ErrorCode() string { return "InternalServerErrorException" } +func (e *InternalServerErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// HTTP Status Code 404: Resource not found due to incorrect input. Correct your +// request and then retry it. +type NotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NotFoundException) ErrorCode() string { return "NotFoundException" } +func (e *NotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// HTTP Status Code 503: Service Unavailable. Retrying your request in some time +// might resolve the issue. +type ServiceUnavailableException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ServiceUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ServiceUnavailableException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ServiceUnavailableException) ErrorCode() string { return "ServiceUnavailableException" } +func (e *ServiceUnavailableException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// HTTP Status Code 429: Limit exceeded. Resource limit reached. +type TooManyRequestsException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TooManyRequestsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TooManyRequestsException) ErrorCode() string { return "TooManyRequestsException" } +func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be +// validated. +type UnauthorizedException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *UnauthorizedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnauthorizedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnauthorizedException) ErrorCode() string { return "UnauthorizedException" } +func (e *UnauthorizedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/service/kafkaconnect/types/types.go b/service/kafkaconnect/types/types.go new file mode 100644 index 00000000000..02c20217d34 --- /dev/null +++ b/service/kafkaconnect/types/types.go @@ -0,0 +1,813 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// The details of the Apache Kafka cluster to which the connector is connected. +type ApacheKafkaCluster struct { + + // The bootstrap servers of the cluster. + // + // This member is required. + BootstrapServers *string + + // Details of an Amazon VPC which has network connectivity to the Apache Kafka + // cluster. + // + // This member is required. + Vpc *Vpc + + noSmithyDocumentSerde +} + +// The description of the Apache Kafka cluster to which the connector is connected. +type ApacheKafkaClusterDescription struct { + + // The bootstrap servers of the cluster. + BootstrapServers *string + + // Details of an Amazon VPC which has network connectivity to the Apache Kafka + // cluster. + Vpc *VpcDescription + + noSmithyDocumentSerde +} + +// Specifies how the connector scales. +type AutoScaling struct { + + // The maximum number of workers allocated to the connector. + // + // This member is required. + MaxWorkerCount int32 + + // The number of microcontroller units (MCUs) allocated to each connector worker. + // The valid values are 1,2,4,8. + // + // This member is required. + McuCount int32 + + // The minimum number of workers allocated to the connector. + // + // This member is required. + MinWorkerCount int32 + + // The sacle-in policy for the connector. + ScaleInPolicy *ScaleInPolicy + + // The sacle-out policy for the connector. + ScaleOutPolicy *ScaleOutPolicy + + noSmithyDocumentSerde +} + +// Information about the auto scaling parameters for the connector. +type AutoScalingDescription struct { + + // The maximum number of workers allocated to the connector. + MaxWorkerCount int32 + + // The number of microcontroller units (MCUs) allocated to each connector worker. + // The valid values are 1,2,4,8. + McuCount int32 + + // The minimum number of workers allocated to the connector. + MinWorkerCount int32 + + // The sacle-in policy for the connector. + ScaleInPolicy *ScaleInPolicyDescription + + // The sacle-out policy for the connector.> + ScaleOutPolicy *ScaleOutPolicyDescription + + noSmithyDocumentSerde +} + +// The updates to the auto scaling parameters for the connector. +type AutoScalingUpdate struct { + + // The target maximum number of workers allocated to the connector. + // + // This member is required. + MaxWorkerCount int32 + + // The target number of microcontroller units (MCUs) allocated to each connector + // worker. The valid values are 1,2,4,8. + // + // This member is required. + McuCount int32 + + // The target minimum number of workers allocated to the connector. + // + // This member is required. + MinWorkerCount int32 + + // The target sacle-in policy for the connector. + // + // This member is required. + ScaleInPolicy *ScaleInPolicyUpdate + + // The target sacle-out policy for the connector. + // + // This member is required. + ScaleOutPolicy *ScaleOutPolicyUpdate + + noSmithyDocumentSerde +} + +// Information about the capacity of the connector, whether it is auto scaled or +// provisioned. +type Capacity struct { + + // Information about the auto scaling parameters for the connector. + AutoScaling *AutoScaling + + // Details about a fixed capacity allocated to a connector. + ProvisionedCapacity *ProvisionedCapacity + + noSmithyDocumentSerde +} + +// A description of the connector's capacity. +type CapacityDescription struct { + + // Describes the connector's auto scaling capacity. + AutoScaling *AutoScalingDescription + + // Describes a connector's provisioned capacity. + ProvisionedCapacity *ProvisionedCapacityDescription + + noSmithyDocumentSerde +} + +// The target capacity for the connector. The capacity can be auto scaled or +// provisioned. +type CapacityUpdate struct { + + // The target auto scaling setting. + AutoScaling *AutoScalingUpdate + + // The target settings for provisioned capacity. + ProvisionedCapacity *ProvisionedCapacityUpdate + + noSmithyDocumentSerde +} + +// The settings for delivering connector logs to Amazon CloudWatch Logs. +type CloudWatchLogsLogDelivery struct { + + // Whether log delivery to Amazon CloudWatch Logs is enabled. + // + // This member is required. + Enabled bool + + // The name of the CloudWatch log group that is the destination for log delivery. + LogGroup *string + + noSmithyDocumentSerde +} + +// A description of the log delivery settings. +type CloudWatchLogsLogDeliveryDescription struct { + + // Whether log delivery to Amazon CloudWatch Logs is enabled. + Enabled bool + + // The name of the CloudWatch log group that is the destination for log delivery. + LogGroup *string + + noSmithyDocumentSerde +} + +// Summary of a connector. +type ConnectorSummary struct { + + // The connector's compute capacity settings. + Capacity *CapacityDescription + + // The Amazon Resource Name (ARN) of the connector. + ConnectorArn *string + + // The description of the connector. + ConnectorDescription *string + + // The name of the connector. + ConnectorName *string + + // The state of the connector. + ConnectorState ConnectorState + + // The time that the connector was created. + CreationTime *time.Time + + // The current version of the connector. + CurrentVersion *string + + // The details of the Apache Kafka cluster to which the connector is connected. + KafkaCluster *KafkaClusterDescription + + // The type of client authentication used to connect to the Apache Kafka cluster. + // The value is NONE when no client authentication is used. + KafkaClusterClientAuthentication *KafkaClusterClientAuthenticationDescription + + // Details of encryption in transit to the Apache Kafka cluster. + KafkaClusterEncryptionInTransit *KafkaClusterEncryptionInTransitDescription + + // The version of Kafka Connect. It has to be compatible with both the Apache Kafka + // cluster's version and the plugins. + KafkaConnectVersion *string + + // The settings for delivering connector logs to Amazon CloudWatch Logs. + LogDelivery *LogDeliveryDescription + + // Specifies which plugins were used for this connector. + Plugins []PluginDescription + + // The Amazon Resource Name (ARN) of the IAM role used by the connector to access + // Amazon Web Services resources. + ServiceExecutionRoleArn *string + + // The worker configurations that are in use with the connector. + WorkerConfiguration *WorkerConfigurationDescription + + noSmithyDocumentSerde +} + +// A plugin is an AWS resource that contains the code that defines a connector's +// logic. +type CustomPlugin struct { + + // The Amazon Resource Name (ARN) of the custom plugin. + // + // This member is required. + CustomPluginArn *string + + // The revision of the custom plugin. + // + // This member is required. + Revision int64 + + noSmithyDocumentSerde +} + +// Details about a custom plugin. +type CustomPluginDescription struct { + + // The Amazon Resource Name (ARN) of the custom plugin. + CustomPluginArn *string + + // The revision of the custom plugin. + Revision int64 + + noSmithyDocumentSerde +} + +// Details about a custom plugin file. +type CustomPluginFileDescription struct { + + // The hex-encoded MD5 checksum of the custom plugin file. You can use it to + // validate the file. + FileMd5 *string + + // The size in bytes of the custom plugin file. You can use it to validate the + // file. + FileSize int64 + + noSmithyDocumentSerde +} + +// Information about the location of a custom plugin. +type CustomPluginLocation struct { + + // The S3 bucket Amazon Resource Name (ARN), file key, and object version of the + // plugin file stored in Amazon S3. + // + // This member is required. + S3Location *S3Location + + noSmithyDocumentSerde +} + +// Information about the location of a custom plugin. +type CustomPluginLocationDescription struct { + + // The S3 bucket Amazon Resource Name (ARN), file key, and object version of the + // plugin file stored in Amazon S3. + S3Location *S3LocationDescription + + noSmithyDocumentSerde +} + +// Details about the revision of a custom plugin. +type CustomPluginRevisionSummary struct { + + // The format of the plugin file. + ContentType CustomPluginContentType + + // The time that the custom plugin was created. + CreationTime *time.Time + + // The description of the custom plugin. + Description *string + + // Details about the custom plugin file. + FileDescription *CustomPluginFileDescription + + // Information about the location of the custom plugin. + Location *CustomPluginLocationDescription + + // The revision of the custom plugin. + Revision int64 + + noSmithyDocumentSerde +} + +// A summary of the custom plugin. +type CustomPluginSummary struct { + + // The time that the custom plugin was created. + CreationTime *time.Time + + // The Amazon Resource Name (ARN) of the custom plugin. + CustomPluginArn *string + + // The state of the custom plugin. + CustomPluginState CustomPluginState + + // A description of the custom plugin. + Description *string + + // The latest revision of the custom plugin. + LatestRevision *CustomPluginRevisionSummary + + // The name of the custom plugin. + Name *string + + noSmithyDocumentSerde +} + +// The settings for delivering logs to Amazon Kinesis Data Firehose. +type FirehoseLogDelivery struct { + + // Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose. + // + // This member is required. + Enabled bool + + // The name of the Kinesis Data Firehose delivery stream that is the destination + // for log delivery. + DeliveryStream *string + + noSmithyDocumentSerde +} + +// A description of the settings for delivering logs to Amazon Kinesis Data +// Firehose. +type FirehoseLogDeliveryDescription struct { + + // The name of the Kinesis Data Firehose delivery stream that is the destination + // for log delivery. + DeliveryStream *string + + // Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose. + Enabled bool + + noSmithyDocumentSerde +} + +// The details of the Apache Kafka cluster to which the connector is connected. +type KafkaCluster struct { + + // The Apache Kafka cluster to which the connector is connected. + // + // This member is required. + ApacheKafkaCluster *ApacheKafkaCluster + + noSmithyDocumentSerde +} + +// The client authentication information used in order to authenticate with the +// Apache Kafka cluster. +type KafkaClusterClientAuthentication struct { + + // The type of client authentication used to connect to the Apache Kafka cluster. + // Value NONE means that no client authentication is used. + // + // This member is required. + AuthenticationType KafkaClusterClientAuthenticationType + + noSmithyDocumentSerde +} + +// The client authentication information used in order to authenticate with the +// Apache Kafka cluster. +type KafkaClusterClientAuthenticationDescription struct { + + // The type of client authentication used to connect to the Apache Kafka cluster. + // Value NONE means that no client authentication is used. + AuthenticationType KafkaClusterClientAuthenticationType + + noSmithyDocumentSerde +} + +// Details of how to connect to the Apache Kafka cluster. +type KafkaClusterDescription struct { + + // The Apache Kafka cluster to which the connector is connected. + ApacheKafkaCluster *ApacheKafkaClusterDescription + + noSmithyDocumentSerde +} + +// Details of encryption in transit to the Apache Kafka cluster. +type KafkaClusterEncryptionInTransit struct { + + // The type of encryption in transit to the Apache Kafka cluster. + // + // This member is required. + EncryptionType KafkaClusterEncryptionInTransitType + + noSmithyDocumentSerde +} + +// The description of the encryption in transit to the Apache Kafka cluster. +type KafkaClusterEncryptionInTransitDescription struct { + + // The type of encryption in transit to the Apache Kafka cluster. + EncryptionType KafkaClusterEncryptionInTransitType + + noSmithyDocumentSerde +} + +// Details about log delivery. +type LogDelivery struct { + + // The workers can send worker logs to different destination types. This + // configuration specifies the details of these destinations. + // + // This member is required. + WorkerLogDelivery *WorkerLogDelivery + + noSmithyDocumentSerde +} + +// The description of the log delivery settings. +type LogDeliveryDescription struct { + + // The workers can send worker logs to different destination types. This + // configuration specifies the details of these destinations. + WorkerLogDelivery *WorkerLogDeliveryDescription + + noSmithyDocumentSerde +} + +// A plugin is an AWS resource that contains the code that defines your connector +// logic. +type Plugin struct { + + // Details about a custom plugin. + // + // This member is required. + CustomPlugin *CustomPlugin + + noSmithyDocumentSerde +} + +// The description of the plugin. +type PluginDescription struct { + + // Details about a custom plugin. + CustomPlugin *CustomPluginDescription + + noSmithyDocumentSerde +} + +// Details about a connector's provisioned capacity. +type ProvisionedCapacity struct { + + // The number of microcontroller units (MCUs) allocated to each connector worker. + // The valid values are 1,2,4,8. + // + // This member is required. + McuCount int32 + + // The number of workers that are allocated to the connector. + // + // This member is required. + WorkerCount int32 + + noSmithyDocumentSerde +} + +// The description of a connector's provisioned capacity. +type ProvisionedCapacityDescription struct { + + // The number of microcontroller units (MCUs) allocated to each connector worker. + // The valid values are 1,2,4,8. + McuCount int32 + + // The number of workers that are allocated to the connector. + WorkerCount int32 + + noSmithyDocumentSerde +} + +// An update to a connector's fixed capacity. +type ProvisionedCapacityUpdate struct { + + // The number of microcontroller units (MCUs) allocated to each connector worker. + // The valid values are 1,2,4,8. + // + // This member is required. + McuCount int32 + + // The number of workers that are allocated to the connector. + // + // This member is required. + WorkerCount int32 + + noSmithyDocumentSerde +} + +// The location of an object in Amazon S3. +type S3Location struct { + + // The Amazon Resource Name (ARN) of an S3 bucket. + // + // This member is required. + BucketArn *string + + // The file key for an object in an S3 bucket. + // + // This member is required. + FileKey *string + + // The version of an object in an S3 bucket. + ObjectVersion *string + + noSmithyDocumentSerde +} + +// The description of the location of an object in Amazon S3. +type S3LocationDescription struct { + + // The Amazon Resource Name (ARN) of an S3 bucket. + BucketArn *string + + // The file key for an object in an S3 bucket. + FileKey *string + + // The version of an object in an S3 bucket. + ObjectVersion *string + + noSmithyDocumentSerde +} + +// Details about delivering logs to Amazon S3. +type S3LogDelivery struct { + + // Specifies whether connector logs get sent to the specified Amazon S3 + // destination. + // + // This member is required. + Enabled bool + + // The name of the S3 bucket that is the destination for log delivery. + Bucket *string + + // The S3 prefix that is the destination for log delivery. + Prefix *string + + noSmithyDocumentSerde +} + +// The description of the details about delivering logs to Amazon S3. +type S3LogDeliveryDescription struct { + + // The name of the S3 bucket that is the destination for log delivery. + Bucket *string + + // Specifies whether connector logs get sent to the specified Amazon S3 + // destination. + Enabled bool + + // The S3 prefix that is the destination for log delivery. + Prefix *string + + noSmithyDocumentSerde +} + +// The scale-in policy for the connector. +type ScaleInPolicy struct { + + // Specifies the CPU utilization percentage threshold at which you want connector + // scale in to be triggered. + // + // This member is required. + CpuUtilizationPercentage int32 + + noSmithyDocumentSerde +} + +// The description of the scale-in policy for the connector. +type ScaleInPolicyDescription struct { + + // Specifies the CPU utilization percentage threshold at which you want connector + // scale in to be triggered. + CpuUtilizationPercentage int32 + + noSmithyDocumentSerde +} + +// An update to the connector's scale-in policy. +type ScaleInPolicyUpdate struct { + + // The target CPU utilization percentage threshold at which you want connector + // scale in to be triggered. + // + // This member is required. + CpuUtilizationPercentage int32 + + noSmithyDocumentSerde +} + +// The scale-out policy for the connector. +type ScaleOutPolicy struct { + + // The CPU utilization percentage threshold at which you want connector scale out + // to be triggered. + // + // This member is required. + CpuUtilizationPercentage int32 + + noSmithyDocumentSerde +} + +// The description of the scale-out policy for the connector. +type ScaleOutPolicyDescription struct { + + // The CPU utilization percentage threshold at which you want connector scale out + // to be triggered. + CpuUtilizationPercentage int32 + + noSmithyDocumentSerde +} + +// An update to the connector's scale-out policy. +type ScaleOutPolicyUpdate struct { + + // The target CPU utilization percentage threshold at which you want connector + // scale out to be triggered. + // + // This member is required. + CpuUtilizationPercentage int32 + + noSmithyDocumentSerde +} + +// Information about the VPC in which the connector resides. +type Vpc struct { + + // The subnets for the connector. + // + // This member is required. + Subnets []string + + // The security groups for the connector. + SecurityGroups []string + + noSmithyDocumentSerde +} + +// The description of the VPC in which the connector resides. +type VpcDescription struct { + + // The security groups for the connector. + SecurityGroups []string + + // The subnets for the connector. + Subnets []string + + noSmithyDocumentSerde +} + +// The configuration of the workers, which are the processes that run the connector +// logic. +type WorkerConfiguration struct { + + // The revision of the worker configuration. + // + // This member is required. + Revision int64 + + // The Amazon Resource Name (ARN) of the worker configuration. + // + // This member is required. + WorkerConfigurationArn *string + + noSmithyDocumentSerde +} + +// The description of the worker configuration. +type WorkerConfigurationDescription struct { + + // The revision of the worker configuration. + Revision int64 + + // The Amazon Resource Name (ARN) of the worker configuration. + WorkerConfigurationArn *string + + noSmithyDocumentSerde +} + +// The description of the worker configuration revision. +type WorkerConfigurationRevisionDescription struct { + + // The time that the worker configuration was created. + CreationTime *time.Time + + // The description of the worker configuration revision. + Description *string + + // Base64 encoded contents of the connect-distributed.properties file. + PropertiesFileContent *string + + // The description of a revision of the worker configuration. + Revision int64 + + noSmithyDocumentSerde +} + +// The summary of a worker configuration revision. +type WorkerConfigurationRevisionSummary struct { + + // The time that a worker configuration revision was created. + CreationTime *time.Time + + // The description of a worker configuration revision. + Description *string + + // The revision of a worker configuration. + Revision int64 + + noSmithyDocumentSerde +} + +// The summary of a worker configuration. +type WorkerConfigurationSummary struct { + + // The time that a worker configuration was created. + CreationTime *time.Time + + // The description of a worker configuration. + Description *string + + // The latest revision of a worker configuration. + LatestRevision *WorkerConfigurationRevisionSummary + + // The name of the worker configuration. + Name *string + + // The Amazon Resource Name (ARN) of the worker configuration. + WorkerConfigurationArn *string + + noSmithyDocumentSerde +} + +// Workers can send worker logs to different destination types. This configuration +// specifies the details of these destinations. +type WorkerLogDelivery struct { + + // Details about delivering logs to Amazon CloudWatch Logs. + CloudWatchLogs *CloudWatchLogsLogDelivery + + // Details about delivering logs to Amazon Kinesis Data Firehose. + Firehose *FirehoseLogDelivery + + // Details about delivering logs to Amazon S3. + S3 *S3LogDelivery + + noSmithyDocumentSerde +} + +// Workers can send worker logs to different destination types. This configuration +// specifies the details of these destinations. +type WorkerLogDeliveryDescription struct { + + // Details about delivering logs to Amazon CloudWatch Logs. + CloudWatchLogs *CloudWatchLogsLogDeliveryDescription + + // Details about delivering logs to Amazon Kinesis Data Firehose. + Firehose *FirehoseLogDeliveryDescription + + // Details about delivering logs to Amazon S3. + S3 *S3LogDeliveryDescription + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/service/kafkaconnect/validators.go b/service/kafkaconnect/validators.go new file mode 100644 index 00000000000..df8ab7e4b15 --- /dev/null +++ b/service/kafkaconnect/validators.go @@ -0,0 +1,835 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpCreateConnector struct { +} + +func (*validateOpCreateConnector) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateConnector) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateConnectorInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateConnectorInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateCustomPlugin struct { +} + +func (*validateOpCreateCustomPlugin) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateCustomPlugin) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateCustomPluginInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateCustomPluginInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateWorkerConfiguration struct { +} + +func (*validateOpCreateWorkerConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateWorkerConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateWorkerConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateWorkerConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteConnector struct { +} + +func (*validateOpDeleteConnector) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteConnector) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteConnectorInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteConnectorInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeConnector struct { +} + +func (*validateOpDescribeConnector) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeConnector) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeConnectorInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeConnectorInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeCustomPlugin struct { +} + +func (*validateOpDescribeCustomPlugin) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeCustomPlugin) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeCustomPluginInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeCustomPluginInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeWorkerConfiguration struct { +} + +func (*validateOpDescribeWorkerConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeWorkerConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeWorkerConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeWorkerConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateConnector struct { +} + +func (*validateOpUpdateConnector) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateConnector) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateConnectorInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateConnectorInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpCreateConnectorValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateConnector{}, middleware.After) +} + +func addOpCreateCustomPluginValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateCustomPlugin{}, middleware.After) +} + +func addOpCreateWorkerConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateWorkerConfiguration{}, middleware.After) +} + +func addOpDeleteConnectorValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteConnector{}, middleware.After) +} + +func addOpDescribeConnectorValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeConnector{}, middleware.After) +} + +func addOpDescribeCustomPluginValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeCustomPlugin{}, middleware.After) +} + +func addOpDescribeWorkerConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeWorkerConfiguration{}, middleware.After) +} + +func addOpUpdateConnectorValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateConnector{}, middleware.After) +} + +func validate__listOfPlugin(v []types.Plugin) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListOfPlugin"} + for i := range v { + if err := validatePlugin(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateApacheKafkaCluster(v *types.ApacheKafkaCluster) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ApacheKafkaCluster"} + if v.BootstrapServers == nil { + invalidParams.Add(smithy.NewErrParamRequired("BootstrapServers")) + } + if v.Vpc == nil { + invalidParams.Add(smithy.NewErrParamRequired("Vpc")) + } else if v.Vpc != nil { + if err := validateVpc(v.Vpc); err != nil { + invalidParams.AddNested("Vpc", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAutoScaling(v *types.AutoScaling) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AutoScaling"} + if v.ScaleInPolicy != nil { + if err := validateScaleInPolicy(v.ScaleInPolicy); err != nil { + invalidParams.AddNested("ScaleInPolicy", err.(smithy.InvalidParamsError)) + } + } + if v.ScaleOutPolicy != nil { + if err := validateScaleOutPolicy(v.ScaleOutPolicy); err != nil { + invalidParams.AddNested("ScaleOutPolicy", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateAutoScalingUpdate(v *types.AutoScalingUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AutoScalingUpdate"} + if v.ScaleInPolicy == nil { + invalidParams.Add(smithy.NewErrParamRequired("ScaleInPolicy")) + } else if v.ScaleInPolicy != nil { + if err := validateScaleInPolicyUpdate(v.ScaleInPolicy); err != nil { + invalidParams.AddNested("ScaleInPolicy", err.(smithy.InvalidParamsError)) + } + } + if v.ScaleOutPolicy == nil { + invalidParams.Add(smithy.NewErrParamRequired("ScaleOutPolicy")) + } else if v.ScaleOutPolicy != nil { + if err := validateScaleOutPolicyUpdate(v.ScaleOutPolicy); err != nil { + invalidParams.AddNested("ScaleOutPolicy", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCapacity(v *types.Capacity) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Capacity"} + if v.AutoScaling != nil { + if err := validateAutoScaling(v.AutoScaling); err != nil { + invalidParams.AddNested("AutoScaling", err.(smithy.InvalidParamsError)) + } + } + if v.ProvisionedCapacity != nil { + if err := validateProvisionedCapacity(v.ProvisionedCapacity); err != nil { + invalidParams.AddNested("ProvisionedCapacity", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCapacityUpdate(v *types.CapacityUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CapacityUpdate"} + if v.AutoScaling != nil { + if err := validateAutoScalingUpdate(v.AutoScaling); err != nil { + invalidParams.AddNested("AutoScaling", err.(smithy.InvalidParamsError)) + } + } + if v.ProvisionedCapacity != nil { + if err := validateProvisionedCapacityUpdate(v.ProvisionedCapacity); err != nil { + invalidParams.AddNested("ProvisionedCapacity", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCloudWatchLogsLogDelivery(v *types.CloudWatchLogsLogDelivery) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CloudWatchLogsLogDelivery"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCustomPlugin(v *types.CustomPlugin) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CustomPlugin"} + if v.CustomPluginArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomPluginArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateCustomPluginLocation(v *types.CustomPluginLocation) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CustomPluginLocation"} + if v.S3Location == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3Location")) + } else if v.S3Location != nil { + if err := validateS3Location(v.S3Location); err != nil { + invalidParams.AddNested("S3Location", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateFirehoseLogDelivery(v *types.FirehoseLogDelivery) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "FirehoseLogDelivery"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateKafkaCluster(v *types.KafkaCluster) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "KafkaCluster"} + if v.ApacheKafkaCluster == nil { + invalidParams.Add(smithy.NewErrParamRequired("ApacheKafkaCluster")) + } else if v.ApacheKafkaCluster != nil { + if err := validateApacheKafkaCluster(v.ApacheKafkaCluster); err != nil { + invalidParams.AddNested("ApacheKafkaCluster", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateKafkaClusterClientAuthentication(v *types.KafkaClusterClientAuthentication) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "KafkaClusterClientAuthentication"} + if len(v.AuthenticationType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("AuthenticationType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateKafkaClusterEncryptionInTransit(v *types.KafkaClusterEncryptionInTransit) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "KafkaClusterEncryptionInTransit"} + if len(v.EncryptionType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("EncryptionType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateLogDelivery(v *types.LogDelivery) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LogDelivery"} + if v.WorkerLogDelivery == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkerLogDelivery")) + } else if v.WorkerLogDelivery != nil { + if err := validateWorkerLogDelivery(v.WorkerLogDelivery); err != nil { + invalidParams.AddNested("WorkerLogDelivery", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validatePlugin(v *types.Plugin) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Plugin"} + if v.CustomPlugin == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomPlugin")) + } else if v.CustomPlugin != nil { + if err := validateCustomPlugin(v.CustomPlugin); err != nil { + invalidParams.AddNested("CustomPlugin", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateProvisionedCapacity(v *types.ProvisionedCapacity) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ProvisionedCapacity"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateProvisionedCapacityUpdate(v *types.ProvisionedCapacityUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ProvisionedCapacityUpdate"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateS3Location(v *types.S3Location) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "S3Location"} + if v.BucketArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("BucketArn")) + } + if v.FileKey == nil { + invalidParams.Add(smithy.NewErrParamRequired("FileKey")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateS3LogDelivery(v *types.S3LogDelivery) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "S3LogDelivery"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateScaleInPolicy(v *types.ScaleInPolicy) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ScaleInPolicy"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateScaleInPolicyUpdate(v *types.ScaleInPolicyUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ScaleInPolicyUpdate"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateScaleOutPolicy(v *types.ScaleOutPolicy) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ScaleOutPolicy"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateScaleOutPolicyUpdate(v *types.ScaleOutPolicyUpdate) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ScaleOutPolicyUpdate"} + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateVpc(v *types.Vpc) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Vpc"} + if v.Subnets == nil { + invalidParams.Add(smithy.NewErrParamRequired("Subnets")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateWorkerConfiguration(v *types.WorkerConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "WorkerConfiguration"} + if v.WorkerConfigurationArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkerConfigurationArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateWorkerLogDelivery(v *types.WorkerLogDelivery) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "WorkerLogDelivery"} + if v.CloudWatchLogs != nil { + if err := validateCloudWatchLogsLogDelivery(v.CloudWatchLogs); err != nil { + invalidParams.AddNested("CloudWatchLogs", err.(smithy.InvalidParamsError)) + } + } + if v.Firehose != nil { + if err := validateFirehoseLogDelivery(v.Firehose); err != nil { + invalidParams.AddNested("Firehose", err.(smithy.InvalidParamsError)) + } + } + if v.S3 != nil { + if err := validateS3LogDelivery(v.S3); err != nil { + invalidParams.AddNested("S3", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateConnectorInput(v *CreateConnectorInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateConnectorInput"} + if v.Capacity == nil { + invalidParams.Add(smithy.NewErrParamRequired("Capacity")) + } else if v.Capacity != nil { + if err := validateCapacity(v.Capacity); err != nil { + invalidParams.AddNested("Capacity", err.(smithy.InvalidParamsError)) + } + } + if v.ConnectorConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("ConnectorConfiguration")) + } + if v.ConnectorName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ConnectorName")) + } + if v.KafkaCluster == nil { + invalidParams.Add(smithy.NewErrParamRequired("KafkaCluster")) + } else if v.KafkaCluster != nil { + if err := validateKafkaCluster(v.KafkaCluster); err != nil { + invalidParams.AddNested("KafkaCluster", err.(smithy.InvalidParamsError)) + } + } + if v.KafkaClusterClientAuthentication == nil { + invalidParams.Add(smithy.NewErrParamRequired("KafkaClusterClientAuthentication")) + } else if v.KafkaClusterClientAuthentication != nil { + if err := validateKafkaClusterClientAuthentication(v.KafkaClusterClientAuthentication); err != nil { + invalidParams.AddNested("KafkaClusterClientAuthentication", err.(smithy.InvalidParamsError)) + } + } + if v.KafkaClusterEncryptionInTransit == nil { + invalidParams.Add(smithy.NewErrParamRequired("KafkaClusterEncryptionInTransit")) + } else if v.KafkaClusterEncryptionInTransit != nil { + if err := validateKafkaClusterEncryptionInTransit(v.KafkaClusterEncryptionInTransit); err != nil { + invalidParams.AddNested("KafkaClusterEncryptionInTransit", err.(smithy.InvalidParamsError)) + } + } + if v.KafkaConnectVersion == nil { + invalidParams.Add(smithy.NewErrParamRequired("KafkaConnectVersion")) + } + if v.LogDelivery != nil { + if err := validateLogDelivery(v.LogDelivery); err != nil { + invalidParams.AddNested("LogDelivery", err.(smithy.InvalidParamsError)) + } + } + if v.Plugins == nil { + invalidParams.Add(smithy.NewErrParamRequired("Plugins")) + } else if v.Plugins != nil { + if err := validate__listOfPlugin(v.Plugins); err != nil { + invalidParams.AddNested("Plugins", err.(smithy.InvalidParamsError)) + } + } + if v.ServiceExecutionRoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ServiceExecutionRoleArn")) + } + if v.WorkerConfiguration != nil { + if err := validateWorkerConfiguration(v.WorkerConfiguration); err != nil { + invalidParams.AddNested("WorkerConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateCustomPluginInput(v *CreateCustomPluginInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateCustomPluginInput"} + if len(v.ContentType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ContentType")) + } + if v.Location == nil { + invalidParams.Add(smithy.NewErrParamRequired("Location")) + } else if v.Location != nil { + if err := validateCustomPluginLocation(v.Location); err != nil { + invalidParams.AddNested("Location", err.(smithy.InvalidParamsError)) + } + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateWorkerConfigurationInput(v *CreateWorkerConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateWorkerConfigurationInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.PropertiesFileContent == nil { + invalidParams.Add(smithy.NewErrParamRequired("PropertiesFileContent")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteConnectorInput(v *DeleteConnectorInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteConnectorInput"} + if v.ConnectorArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ConnectorArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeConnectorInput(v *DescribeConnectorInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeConnectorInput"} + if v.ConnectorArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ConnectorArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeCustomPluginInput(v *DescribeCustomPluginInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeCustomPluginInput"} + if v.CustomPluginArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomPluginArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeWorkerConfigurationInput(v *DescribeWorkerConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeWorkerConfigurationInput"} + if v.WorkerConfigurationArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("WorkerConfigurationArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateConnectorInput(v *UpdateConnectorInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateConnectorInput"} + if v.Capacity == nil { + invalidParams.Add(smithy.NewErrParamRequired("Capacity")) + } else if v.Capacity != nil { + if err := validateCapacityUpdate(v.Capacity); err != nil { + invalidParams.AddNested("Capacity", err.(smithy.InvalidParamsError)) + } + } + if v.ConnectorArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ConnectorArn")) + } + if v.CurrentVersion == nil { + invalidParams.Add(smithy.NewErrParamRequired("CurrentVersion")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/service/lexmodelsv2/internal/endpoints/endpoints.go b/service/lexmodelsv2/internal/endpoints/endpoints.go index b36c0b4dfe3..67c37c372ff 100644 --- a/service/lexmodelsv2/internal/endpoints/endpoints.go +++ b/service/lexmodelsv2/internal/endpoints/endpoints.go @@ -62,6 +62,17 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.Aws, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + "ap-northeast-1": endpoints.Endpoint{}, + "ap-southeast-1": endpoints.Endpoint{}, + "ap-southeast-2": endpoints.Endpoint{}, + "ca-central-1": endpoints.Endpoint{}, + "eu-central-1": endpoints.Endpoint{}, + "eu-west-1": endpoints.Endpoint{}, + "eu-west-2": endpoints.Endpoint{}, + "us-east-1": endpoints.Endpoint{}, + "us-west-2": endpoints.Endpoint{}, + }, }, { ID: "aws-cn", diff --git a/service/lexruntimev2/internal/endpoints/endpoints.go b/service/lexruntimev2/internal/endpoints/endpoints.go index 6377d85becd..59eaf2230f7 100644 --- a/service/lexruntimev2/internal/endpoints/endpoints.go +++ b/service/lexruntimev2/internal/endpoints/endpoints.go @@ -62,6 +62,17 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.Aws, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + "ap-northeast-1": endpoints.Endpoint{}, + "ap-southeast-1": endpoints.Endpoint{}, + "ap-southeast-2": endpoints.Endpoint{}, + "ca-central-1": endpoints.Endpoint{}, + "eu-central-1": endpoints.Endpoint{}, + "eu-west-1": endpoints.Endpoint{}, + "eu-west-2": endpoints.Endpoint{}, + "us-east-1": endpoints.Endpoint{}, + "us-west-2": endpoints.Endpoint{}, + }, }, { ID: "aws-cn", diff --git a/service/macie2/api_op_BatchGetCustomDataIdentifiers.go b/service/macie2/api_op_BatchGetCustomDataIdentifiers.go index a62fecb3273..ed53baa03e1 100644 --- a/service/macie2/api_op_BatchGetCustomDataIdentifiers.go +++ b/service/macie2/api_op_BatchGetCustomDataIdentifiers.go @@ -29,8 +29,8 @@ func (c *Client) BatchGetCustomDataIdentifiers(ctx context.Context, params *Batc type BatchGetCustomDataIdentifiersInput struct { - // An array of strings that lists the unique identifiers for the custom data - // identifiers to retrieve information about. + // An array of custom data identifier IDs, one for each custom data identifier to + // retrieve information about. Ids []string noSmithyDocumentSerde @@ -42,8 +42,9 @@ type BatchGetCustomDataIdentifiersOutput struct { // specified in the request. CustomDataIdentifiers []types.BatchGetCustomDataIdentifierSummary - // An array of identifiers, one for each identifier that was specified in the - // request, but doesn't correlate to an existing custom data identifier. + // An array of custom data identifier IDs, one for each custom data identifier that + // was specified in the request but doesn't correlate to an existing custom data + // identifier. NotFoundIdentifierIds []string // Metadata pertaining to the operation's result. diff --git a/service/macie2/api_op_CreateClassificationJob.go b/service/macie2/api_op_CreateClassificationJob.go index 6e785e42d13..ce23fd22cae 100644 --- a/service/macie2/api_op_CreateClassificationJob.go +++ b/service/macie2/api_op_CreateClassificationJob.go @@ -60,22 +60,61 @@ type CreateClassificationJobInput struct { // This member is required. S3JobDefinition *types.S3JobDefinition - // The custom data identifiers to use for data analysis and classification. + // An array of unique identifiers, one for each custom data identifier for the job + // to use when it analyzes data. To use only managed data identifiers, don't + // specify a value for this property and specify a value other than NONE for the + // managedDataIdentifierSelector property. CustomDataIdentifierIds []string // A custom description of the job. The description can contain as many as 200 // characters. Description *string - // Specifies whether to analyze all existing, eligible objects immediately after - // the job is created. + // For a recurring job, specifies whether to analyze all existing, eligible objects + // immediately after the job is created (true). To analyze only those objects that + // are created or changed after you create the job and before the job's first + // scheduled run, set this value to false.If you configure the job to run only + // once, don't specify a value for this property. InitialRun bool - // The sampling depth, as a percentage, to apply when processing objects. This - // value determines the percentage of eligible objects that the job analyzes. If - // this value is less than 100, Amazon Macie selects the objects to analyze at - // random, up to the specified percentage, and analyzes all the data in those - // objects. + // An array of unique identifiers, one for each managed data identifier for the job + // to include (use) or exclude (not use) when it analyzes data. Inclusion or + // exclusion depends on the managed data identifier selection type that you specify + // for the job (managedDataIdentifierSelector).To retrieve a list of valid values + // for this property, use the ListManagedDataIdentifiers operation. + ManagedDataIdentifierIds []string + + // The selection type to apply when determining which managed data identifiers the + // job uses to analyze data. Valid values are: + // + // * ALL - Use all the managed data + // identifiers that Amazon Macie provides. If you specify this value, don't specify + // any values for the managedDataIdentifierIds property. + // + // * EXCLUDE - Use all the + // managed data identifiers that Macie provides except the managed data identifiers + // specified by the managedDataIdentifierIds property. + // + // * INCLUDE - Use only the + // managed data identifiers specified by the managedDataIdentifierIds property. + // + // * + // NONE - Don't use any managed data identifiers. If you specify this value, + // specify at least one custom data identifier for the job + // (customDataIdentifierIds) and don't specify any values for the + // managedDataIdentifierIds property. + // + // If you don't specify a value for this + // property, the job uses all managed data identifiers. If you don't specify a + // value for this property or you specify ALL or EXCLUDE for a recurring job, the + // job also uses new managed data identifiers as they are released. + ManagedDataIdentifierSelector types.ManagedDataIdentifierSelector + + // The sampling depth, as a percentage, for the job to apply when processing + // objects. This value determines the percentage of eligible objects that the job + // analyzes. If this value is less than 100, Amazon Macie selects the objects to + // analyze at random, up to the specified percentage, and analyzes all the data in + // those objects. SamplingPercentage int32 // The recurrence pattern for running the job. To run the job only once, don't diff --git a/service/macie2/api_op_CreateCustomDataIdentifier.go b/service/macie2/api_op_CreateCustomDataIdentifier.go index a4d1bca1c48..a0ca148c542 100644 --- a/service/macie2/api_op_CreateCustomDataIdentifier.go +++ b/service/macie2/api_op_CreateCustomDataIdentifier.go @@ -44,21 +44,21 @@ type CreateCustomDataIdentifierInput struct { // An array that lists specific character sequences (ignore words) to exclude from // the results. If the text matched by the regular expression is the same as any // string in this array, Amazon Macie ignores it. The array can contain as many as - // 10 ignore words. Each ignore word can contain 4-90 characters. Ignore words are - // case sensitive. + // 10 ignore words. Each ignore word can contain 4-90 UTF-8 characters. Ignore + // words are case sensitive. IgnoreWords []string // An array that lists specific character sequences (keywords), one of which must // be within proximity (maximumMatchDistance) of the regular expression to match. // The array can contain as many as 50 keywords. Each keyword can contain 3-90 - // characters. Keywords aren't case sensitive. + // UTF-8 characters. Keywords aren't case sensitive. Keywords []string // The maximum number of characters that can exist between text that matches the - // regex pattern and the character sequences specified by the keywords array. Macie - // includes or excludes a result based on the proximity of a keyword to text that - // matches the regex pattern. The distance can be 1-300 characters. The default - // value is 50. + // regex pattern and the character sequences specified by the keywords array. + // Amazon Macie includes or excludes a result based on the proximity of a keyword + // to text that matches the regex pattern. The distance can be 1-300 characters. + // The default value is 50. MaximumMatchDistance int32 // A custom name for the custom data identifier. The name can contain as many as diff --git a/service/macie2/api_op_DescribeClassificationJob.go b/service/macie2/api_op_DescribeClassificationJob.go index 3cd2a9b4fd4..f0ab48af128 100644 --- a/service/macie2/api_op_DescribeClassificationJob.go +++ b/service/macie2/api_op_DescribeClassificationJob.go @@ -48,14 +48,19 @@ type DescribeClassificationJobOutput struct { // created. CreatedAt *time.Time - // The custom data identifiers that the job uses to analyze data. + // An array of unique identifiers, one for each custom data identifier that the job + // uses to analyze data. This value is null if the job uses only managed data + // identifiers to analyze data. CustomDataIdentifierIds []string // The custom description of the job. Description *string - // Specifies whether the job is configured to analyze all existing, eligible - // objects immediately after it's created. + // For a recurring job, specifies whether you configured the job to analyze all + // existing, eligible objects immediately after the job was created (true). If you + // configured the job to analyze only those objects that were created or changed + // after the job was created and before the job's first scheduled run, this value + // is false. This value is also false for a one-time job. InitialRun bool // The Amazon Resource Name (ARN) of the job. @@ -78,18 +83,18 @@ type DescribeClassificationJobOutput struct { // the next scheduled run is pending. This value doesn't apply to one-time jobs. // // * - // PAUSED - Amazon Macie started running the job but additional processing would - // exceed the monthly sensitive data discovery quota for your account or one or - // more member accounts that the job analyzes data for. + // PAUSED - Macie started running the job but additional processing would exceed + // the monthly sensitive data discovery quota for your account or one or more + // member accounts that the job analyzes data for. // - // * RUNNING - For a one-time - // job, the job is in progress. For a recurring job, a scheduled run is in - // progress. + // * RUNNING - For a one-time job, + // the job is in progress. For a recurring job, a scheduled run is in progress. // - // * USER_PAUSED - You paused the job. If you paused the job while it - // had a status of RUNNING and you don't resume it within 30 days of pausing it, - // the job or job run will expire and be cancelled, depending on the job's type. To - // check the expiration date, refer to the UserPausedDetails.jobExpiresAt property. + // * + // USER_PAUSED - You paused the job. If you paused the job while it had a status of + // RUNNING and you don't resume it within 30 days of pausing it, the job or job run + // will expire and be cancelled, depending on the job's type. To check the + // expiration date, refer to the UserPausedDetails.jobExpiresAt property. JobStatus types.JobStatus // The schedule for running the job. Possible values are: @@ -112,6 +117,35 @@ type DescribeClassificationJobOutput struct { // started. LastRunTime *time.Time + // An array of unique identifiers, one for each managed data identifier that the + // job is explicitly configured to include (use) or exclude (not use) when it + // analyzes data. Inclusion or exclusion depends on the managed data identifier + // selection type specified for the job (managedDataIdentifierSelector). This value + // is null if the job's managed data identifier selection type is ALL or the job + // uses only custom data identifiers (customDataIdentifierIds) to analyze data. + ManagedDataIdentifierIds []string + + // The selection type that determines which managed data identifiers the job uses + // to analyze data. Possible values are: + // + // * ALL - Use all the managed data + // identifiers that Amazon Macie provides. + // + // * EXCLUDE - Use all the managed data + // identifiers that Macie provides except the managed data identifiers specified by + // the managedDataIdentifierIds property. + // + // * INCLUDE - Use only the managed data + // identifiers specified by the managedDataIdentifierIds property. + // + // * NONE - Don't + // use any managed data identifiers. + // + // If this value is null, the job uses all + // managed data identifiers. If this value is null, ALL, or EXCLUDE for a recurring + // job, the job also uses new managed data identifiers as they are released. + ManagedDataIdentifierSelector types.ManagedDataIdentifierSelector + // The custom name of the job. Name *string @@ -123,8 +157,8 @@ type DescribeClassificationJobOutput struct { // objects that the job analyzes. SamplingPercentage int32 - // The recurrence pattern for running the job. If the job is configured to run only - // once, this value is null. + // The recurrence pattern for running the job. This value is null if the job is + // configured to run only once. ScheduleFrequency *types.JobScheduleFrequency // The number of times that the job has run and processing statistics for the job's diff --git a/service/macie2/api_op_GetCustomDataIdentifier.go b/service/macie2/api_op_GetCustomDataIdentifier.go index 704b20ecef4..b31043bc063 100644 --- a/service/macie2/api_op_GetCustomDataIdentifier.go +++ b/service/macie2/api_op_GetCustomDataIdentifier.go @@ -69,9 +69,9 @@ type GetCustomDataIdentifierOutput struct { Keywords []string // The maximum number of characters that can exist between text that matches the - // regex pattern and the character sequences specified by the keywords array. Macie - // includes or excludes a result based on the proximity of a keyword to text that - // matches the regex pattern. + // regex pattern and the character sequences specified by the keywords array. + // Amazon Macie includes or excludes a result based on the proximity of a keyword + // to text that matches the regex pattern. MaximumMatchDistance int32 // The custom name of the custom data identifier. diff --git a/service/macie2/api_op_ListInvitations.go b/service/macie2/api_op_ListInvitations.go index 67b52a798a5..052f5a18120 100644 --- a/service/macie2/api_op_ListInvitations.go +++ b/service/macie2/api_op_ListInvitations.go @@ -12,8 +12,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Retrieves information about all the Amazon Macie membership invitations that -// were received by an account. +// Retrieves information about the Amazon Macie membership invitations that were +// received by an account. func (c *Client) ListInvitations(ctx context.Context, params *ListInvitationsInput, optFns ...func(*Options)) (*ListInvitationsOutput, error) { if params == nil { params = &ListInvitationsInput{} diff --git a/service/macie2/api_op_ListManagedDataIdentifiers.go b/service/macie2/api_op_ListManagedDataIdentifiers.go new file mode 100644 index 00000000000..8b8dc2d9bd0 --- /dev/null +++ b/service/macie2/api_op_ListManagedDataIdentifiers.go @@ -0,0 +1,122 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package macie2 + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/macie2/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves information about all the managed data identifiers that Amazon Macie +// currently provides. +func (c *Client) ListManagedDataIdentifiers(ctx context.Context, params *ListManagedDataIdentifiersInput, optFns ...func(*Options)) (*ListManagedDataIdentifiersOutput, error) { + if params == nil { + params = &ListManagedDataIdentifiersInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListManagedDataIdentifiers", params, optFns, c.addOperationListManagedDataIdentifiersMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListManagedDataIdentifiersOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListManagedDataIdentifiersInput struct { + + // The nextToken string that specifies which page of results to return in a + // paginated response. + NextToken *string + + noSmithyDocumentSerde +} + +type ListManagedDataIdentifiersOutput struct { + + // An array of objects, one for each managed data identifier. + Items []types.ManagedDataIdentifierSummary + + // The string to use in a subsequent request to get the next page of results in a + // paginated response. This value is null if there are no additional pages. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListManagedDataIdentifiersMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListManagedDataIdentifiers{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListManagedDataIdentifiers{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListManagedDataIdentifiers(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListManagedDataIdentifiers(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "macie2", + OperationName: "ListManagedDataIdentifiers", + } +} diff --git a/service/macie2/api_op_TestCustomDataIdentifier.go b/service/macie2/api_op_TestCustomDataIdentifier.go index 61ef45daf9e..e3b0f7eea4b 100644 --- a/service/macie2/api_op_TestCustomDataIdentifier.go +++ b/service/macie2/api_op_TestCustomDataIdentifier.go @@ -43,21 +43,21 @@ type TestCustomDataIdentifierInput struct { // An array that lists specific character sequences (ignore words) to exclude from // the results. If the text matched by the regular expression is the same as any // string in this array, Amazon Macie ignores it. The array can contain as many as - // 10 ignore words. Each ignore word can contain 4-90 characters. Ignore words are - // case sensitive. + // 10 ignore words. Each ignore word can contain 4-90 UTF-8 characters. Ignore + // words are case sensitive. IgnoreWords []string // An array that lists specific character sequences (keywords), one of which must // be within proximity (maximumMatchDistance) of the regular expression to match. // The array can contain as many as 50 keywords. Each keyword can contain 3-90 - // characters. Keywords aren't case sensitive. + // UTF-8 characters. Keywords aren't case sensitive. Keywords []string // The maximum number of characters that can exist between text that matches the - // regex pattern and the character sequences specified by the keywords array. Macie - // includes or excludes a result based on the proximity of a keyword to text that - // matches the regex pattern. The distance can be 1-300 characters. The default - // value is 50. + // regex pattern and the character sequences specified by the keywords array. + // Amazon Macie includes or excludes a result based on the proximity of a keyword + // to text that matches the regex pattern. The distance can be 1-300 characters. + // The default value is 50. MaximumMatchDistance int32 noSmithyDocumentSerde diff --git a/service/macie2/deserializers.go b/service/macie2/deserializers.go index b926d0f2a01..6dae85d7d9b 100644 --- a/service/macie2/deserializers.go +++ b/service/macie2/deserializers.go @@ -2308,6 +2308,20 @@ func awsRestjson1_deserializeOpDocumentDescribeClassificationJobOutput(v **Descr sv.LastRunTime = ptr.Time(t) } + case "managedDataIdentifierIds": + if err := awsRestjson1_deserializeDocument__listOf__string(&sv.ManagedDataIdentifierIds, value); err != nil { + return err + } + + case "managedDataIdentifierSelector": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ManagedDataIdentifierSelector to be of type string, got %T instead", value) + } + sv.ManagedDataIdentifierSelector = types.ManagedDataIdentifierSelector(jtv) + } + case "name": if value != nil { jtv, ok := value.(string) @@ -6861,6 +6875,159 @@ func awsRestjson1_deserializeOpDocumentListInvitationsOutput(v **ListInvitations return nil } +type awsRestjson1_deserializeOpListManagedDataIdentifiers struct { +} + +func (*awsRestjson1_deserializeOpListManagedDataIdentifiers) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListManagedDataIdentifiers) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListManagedDataIdentifiers(response, &metadata) + } + output := &ListManagedDataIdentifiersOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListManagedDataIdentifiersOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListManagedDataIdentifiers(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListManagedDataIdentifiersOutput(v **ListManagedDataIdentifiersOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListManagedDataIdentifiersOutput + if *v == nil { + sv = &ListManagedDataIdentifiersOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "items": + if err := awsRestjson1_deserializeDocument__listOfManagedDataIdentifierSummary(&sv.Items, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + type awsRestjson1_deserializeOpListMembers struct { } @@ -9404,6 +9571,40 @@ func awsRestjson1_deserializeDocument__listOfKeyValuePair(v *[]types.KeyValuePai return nil } +func awsRestjson1_deserializeDocument__listOfManagedDataIdentifierSummary(v *[]types.ManagedDataIdentifierSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.ManagedDataIdentifierSummary + if *v == nil { + cv = []types.ManagedDataIdentifierSummary{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.ManagedDataIdentifierSummary + destAddr := &col + if err := awsRestjson1_deserializeDocumentManagedDataIdentifierSummary(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsRestjson1_deserializeDocument__listOfMatchingResource(v *[]types.MatchingResource, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -13450,6 +13651,55 @@ func awsRestjson1_deserializeDocumentLastRunErrorStatus(v **types.LastRunErrorSt return nil } +func awsRestjson1_deserializeDocumentManagedDataIdentifierSummary(v **types.ManagedDataIdentifierSummary, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ManagedDataIdentifierSummary + if *v == nil { + sv = &types.ManagedDataIdentifierSummary{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "category": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SensitiveDataItemCategory to be of type string, got %T instead", value) + } + sv.Category = types.SensitiveDataItemCategory(jtv) + } + + case "id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentMatchingBucket(v **types.MatchingBucket, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/macie2/generated.json b/service/macie2/generated.json index c88b52f8b83..1a19233dd43 100644 --- a/service/macie2/generated.json +++ b/service/macie2/generated.json @@ -47,6 +47,7 @@ "api_op_ListFindings.go", "api_op_ListFindingsFilters.go", "api_op_ListInvitations.go", + "api_op_ListManagedDataIdentifiers.go", "api_op_ListMembers.go", "api_op_ListOrganizationAdminAccounts.go", "api_op_ListTagsForResource.go", diff --git a/service/macie2/serializers.go b/service/macie2/serializers.go index 355693cabf9..8264545a8be 100644 --- a/service/macie2/serializers.go +++ b/service/macie2/serializers.go @@ -249,6 +249,18 @@ func awsRestjson1_serializeOpDocumentCreateClassificationJobInput(v *CreateClass ok.String(string(v.JobType)) } + if v.ManagedDataIdentifierIds != nil { + ok := object.Key("managedDataIdentifierIds") + if err := awsRestjson1_serializeDocument__listOf__string(v.ManagedDataIdentifierIds, ok); err != nil { + return err + } + } + + if len(v.ManagedDataIdentifierSelector) > 0 { + ok := object.Key("managedDataIdentifierSelector") + ok.String(string(v.ManagedDataIdentifierSelector)) + } + if v.Name != nil { ok := object.Key("name") ok.String(*v.Name) @@ -2804,6 +2816,74 @@ func awsRestjson1_serializeOpHttpBindingsListInvitationsInput(v *ListInvitations return nil } +type awsRestjson1_serializeOpListManagedDataIdentifiers struct { +} + +func (*awsRestjson1_serializeOpListManagedDataIdentifiers) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListManagedDataIdentifiers) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListManagedDataIdentifiersInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/managed-data-identifiers/list") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentListManagedDataIdentifiersInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListManagedDataIdentifiersInput(v *ListManagedDataIdentifiersInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentListManagedDataIdentifiersInput(v *ListManagedDataIdentifiersInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.NextToken != nil { + ok := object.Key("nextToken") + ok.String(*v.NextToken) + } + + return nil +} + type awsRestjson1_serializeOpListMembers struct { } diff --git a/service/macie2/types/enums.go b/service/macie2/types/enums.go index 562f1b038f2..1809a99740c 100644 --- a/service/macie2/types/enums.go +++ b/service/macie2/types/enums.go @@ -486,6 +486,29 @@ func (MacieStatus) Values() []MacieStatus { } } +type ManagedDataIdentifierSelector string + +// Enum values for ManagedDataIdentifierSelector +const ( + ManagedDataIdentifierSelectorAll ManagedDataIdentifierSelector = "ALL" + ManagedDataIdentifierSelectorExclude ManagedDataIdentifierSelector = "EXCLUDE" + ManagedDataIdentifierSelectorInclude ManagedDataIdentifierSelector = "INCLUDE" + ManagedDataIdentifierSelectorNone ManagedDataIdentifierSelector = "NONE" +) + +// Values returns all known values for ManagedDataIdentifierSelector. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. +func (ManagedDataIdentifierSelector) Values() []ManagedDataIdentifierSelector { + return []ManagedDataIdentifierSelector{ + "ALL", + "EXCLUDE", + "INCLUDE", + "NONE", + } +} + type OrderBy string // Enum values for OrderBy diff --git a/service/macie2/types/types.go b/service/macie2/types/types.go index 2b23ffab256..ea73fd74c60 100644 --- a/service/macie2/types/types.go +++ b/service/macie2/types/types.go @@ -826,8 +826,8 @@ type DailySchedule struct { noSmithyDocumentSerde } -// Provides information about a type of sensitive data that was detected by managed -// data identifiers and produced a sensitive data finding. +// Provides information about a type of sensitive data that was detected by a +// managed data identifier and produced a sensitive data finding. type DefaultDetection struct { // The total number of occurrences of the type of sensitive data that was detected. @@ -1315,18 +1315,18 @@ type JobSummary struct { // the next scheduled run is pending. This value doesn't apply to one-time jobs. // // * - // PAUSED - Amazon Macie started running the job but additional processing would - // exceed the monthly sensitive data discovery quota for your account or one or - // more member accounts that the job analyzes data for. - // - // * RUNNING - For a one-time - // job, the job is in progress. For a recurring job, a scheduled run is in - // progress. - // - // * USER_PAUSED - You paused the job. If you paused the job while it - // had a status of RUNNING and you don't resume it within 30 days of pausing it, - // the job or job run will expire and be cancelled, depending on the job's type. To - // check the expiration date, refer to the UserPausedDetails.jobExpiresAt property. + // PAUSED - Macie started running the job but additional processing would exceed + // the monthly sensitive data discovery quota for your account or one or more + // member accounts that the job analyzes data for. + // + // * RUNNING - For a one-time job, + // the job is in progress. For a recurring job, a scheduled run is in progress. + // + // * + // USER_PAUSED - You paused the job. If you paused the job while it had a status of + // RUNNING and you don't resume it within 30 days of pausing it, the job or job run + // will expire and be cancelled, depending on the job's type. To check the + // expiration date, refer to the UserPausedDetails.jobExpiresAt property. JobStatus JobStatus // The schedule for running the job. Possible values are: @@ -1437,6 +1437,29 @@ type ListJobsSortCriteria struct { noSmithyDocumentSerde } +// Provides information about a managed data identifier. For additional +// information, see Using managed data identifiers +// (https://docs.aws.amazon.com/macie/latest/user/managed-data-identifiers.html) in +// the Amazon Macie User Guide. +type ManagedDataIdentifierSummary struct { + + // The category of sensitive data that the managed data identifier detects: + // CREDENTIALS, for credentials data such as private keys or Amazon Web Services + // secret keys; FINANCIAL_INFORMATION, for financial data such as credit card + // numbers; or, PERSONAL_INFORMATION, for personal health information, such as + // health insurance identification numbers, or personally identifiable information, + // such as passport numbers. + Category SensitiveDataItemCategory + + // The unique identifier for the managed data identifier. This is a string that + // describes the type of sensitive data that the managed data identifier detects. + // For example: OPENSSH_PRIVATE_KEY for OpenSSH private keys, CREDIT_CARD_NUMBER + // for credit card numbers, or USA_PASSPORT_NUMBER for US passport numbers. + Id *string + + noSmithyDocumentSerde +} + // Provides statistical data and other information about an S3 bucket that Amazon // Macie monitors and analyzes. type MatchingBucket struct { @@ -1621,8 +1644,8 @@ type ObjectLevelStatistics struct { } // Specifies the location of 1-15 occurrences of sensitive data that was detected -// by managed data identifiers or a custom data identifier and produced a sensitive -// data finding. +// by a managed data identifier or a custom data identifier and produced a +// sensitive data finding. type Occurrences struct { // An array of objects, one for each occurrence of sensitive data in a Microsoft @@ -1805,8 +1828,8 @@ type S3Bucket struct { // The name of the bucket. Name *string - // The display name and Amazon Web Services account ID for the user who owns the - // bucket. + // The display name and canonical user ID for the Amazon Web Services account that + // owns the bucket. Owner *S3BucketOwner // The permissions settings that determine whether the bucket is publicly @@ -1853,13 +1876,14 @@ type S3BucketDefinitionForJob struct { noSmithyDocumentSerde } -// Provides information about the user who owns an S3 bucket. +// Provides information about the Amazon Web Services account that owns an S3 +// bucket. type S3BucketOwner struct { - // The display name of the user who owns the bucket. + // The display name of the account that owns the bucket. DisplayName *string - // The Amazon Web Services account ID for the user who owns the bucket. + // The canonical user ID for the account that owns the bucket. Id *string noSmithyDocumentSerde @@ -2140,8 +2164,8 @@ type SensitiveDataItem struct { // credentials data such as private keys or Amazon Web Services secret keys; // FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, // PERSONAL_INFORMATION, for personal health information, such as health insurance - // identification numbers, or personally identifiable information, such as driver's - // license identification numbers. + // identification numbers, or personally identifiable information, such as passport + // numbers. Category SensitiveDataItemCategory // An array of objects, one for each type of sensitive data that was detected. Each diff --git a/service/mediapackagevod/internal/endpoints/endpoints.go b/service/mediapackagevod/internal/endpoints/endpoints.go index 4bb004f5300..a5482c0bffa 100644 --- a/service/mediapackagevod/internal/endpoints/endpoints.go +++ b/service/mediapackagevod/internal/endpoints/endpoints.go @@ -62,6 +62,23 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.Aws, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + "ap-northeast-1": endpoints.Endpoint{}, + "ap-northeast-2": endpoints.Endpoint{}, + "ap-south-1": endpoints.Endpoint{}, + "ap-southeast-1": endpoints.Endpoint{}, + "ap-southeast-2": endpoints.Endpoint{}, + "eu-central-1": endpoints.Endpoint{}, + "eu-north-1": endpoints.Endpoint{}, + "eu-west-1": endpoints.Endpoint{}, + "eu-west-2": endpoints.Endpoint{}, + "eu-west-3": endpoints.Endpoint{}, + "sa-east-1": endpoints.Endpoint{}, + "us-east-1": endpoints.Endpoint{}, + "us-east-2": endpoints.Endpoint{}, + "us-west-1": endpoints.Endpoint{}, + "us-west-2": endpoints.Endpoint{}, + }, }, { ID: "aws-cn", diff --git a/service/networkfirewall/internal/endpoints/endpoints.go b/service/networkfirewall/internal/endpoints/endpoints.go index 8dd0a8c4b57..f79a760616f 100644 --- a/service/networkfirewall/internal/endpoints/endpoints.go +++ b/service/networkfirewall/internal/endpoints/endpoints.go @@ -62,6 +62,59 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.Aws, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + "af-south-1": endpoints.Endpoint{}, + "ap-east-1": endpoints.Endpoint{}, + "ap-northeast-1": endpoints.Endpoint{}, + "ap-northeast-2": endpoints.Endpoint{}, + "ap-northeast-3": endpoints.Endpoint{}, + "ap-south-1": endpoints.Endpoint{}, + "ap-southeast-1": endpoints.Endpoint{}, + "ap-southeast-2": endpoints.Endpoint{}, + "ca-central-1": endpoints.Endpoint{}, + "eu-central-1": endpoints.Endpoint{}, + "eu-north-1": endpoints.Endpoint{}, + "eu-south-1": endpoints.Endpoint{}, + "eu-west-1": endpoints.Endpoint{}, + "eu-west-2": endpoints.Endpoint{}, + "eu-west-3": endpoints.Endpoint{}, + "fips-ca-central-1": endpoints.Endpoint{ + Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoints.Endpoint{ + Hostname: "network-firewall-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoints.Endpoint{ + Hostname: "network-firewall-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoints.Endpoint{ + Hostname: "network-firewall-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoints.Endpoint{ + Hostname: "network-firewall-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoints.Endpoint{}, + "sa-east-1": endpoints.Endpoint{}, + "us-east-1": endpoints.Endpoint{}, + "us-east-2": endpoints.Endpoint{}, + "us-west-1": endpoints.Endpoint{}, + "us-west-2": endpoints.Endpoint{}, + }, }, { ID: "aws-cn", @@ -102,5 +155,21 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.AwsUsGov, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + "fips-us-gov-east-1": endpoints.Endpoint{ + Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoints.Endpoint{ + Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoints.Endpoint{}, + "us-gov-west-1": endpoints.Endpoint{}, + }, }, } diff --git a/service/pinpoint/api_op_CreateInAppTemplate.go b/service/pinpoint/api_op_CreateInAppTemplate.go new file mode 100644 index 00000000000..2ca9be519e7 --- /dev/null +++ b/service/pinpoint/api_op_CreateInAppTemplate.go @@ -0,0 +1,131 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package pinpoint + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/pinpoint/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a new message template for messages using the in-app message channel. +func (c *Client) CreateInAppTemplate(ctx context.Context, params *CreateInAppTemplateInput, optFns ...func(*Options)) (*CreateInAppTemplateOutput, error) { + if params == nil { + params = &CreateInAppTemplateInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateInAppTemplate", params, optFns, c.addOperationCreateInAppTemplateMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateInAppTemplateOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateInAppTemplateInput struct { + + // InApp Template Request. + // + // This member is required. + InAppTemplateRequest *types.InAppTemplateRequest + + // The name of the message template. A template name must start with an + // alphanumeric character and can contain a maximum of 128 characters. The + // characters can be alphanumeric characters, underscores (_), or hyphens (-). + // Template names are case sensitive. + // + // This member is required. + TemplateName *string + + noSmithyDocumentSerde +} + +type CreateInAppTemplateOutput struct { + + // Provides information about a request to create a message template. + // + // This member is required. + TemplateCreateMessageBody *types.TemplateCreateMessageBody + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateInAppTemplateMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateInAppTemplate{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateInAppTemplate{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateInAppTemplateValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateInAppTemplate(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateInAppTemplate(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "mobiletargeting", + OperationName: "CreateInAppTemplate", + } +} diff --git a/service/pinpoint/api_op_DeleteInAppTemplate.go b/service/pinpoint/api_op_DeleteInAppTemplate.go new file mode 100644 index 00000000000..ef82ee621c7 --- /dev/null +++ b/service/pinpoint/api_op_DeleteInAppTemplate.go @@ -0,0 +1,147 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package pinpoint + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/pinpoint/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a message template for messages sent using the in-app message channel. +func (c *Client) DeleteInAppTemplate(ctx context.Context, params *DeleteInAppTemplateInput, optFns ...func(*Options)) (*DeleteInAppTemplateOutput, error) { + if params == nil { + params = &DeleteInAppTemplateInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteInAppTemplate", params, optFns, c.addOperationDeleteInAppTemplateMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteInAppTemplateOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteInAppTemplateInput struct { + + // The name of the message template. A template name must start with an + // alphanumeric character and can contain a maximum of 128 characters. The + // characters can be alphanumeric characters, underscores (_), or hyphens (-). + // Template names are case sensitive. + // + // This member is required. + TemplateName *string + + // The unique identifier for the version of the message template to update, + // retrieve information about, or delete. To retrieve identifiers and other + // information for all the versions of a template, use the Template Versions + // resource. If specified, this value must match the identifier for an existing + // template version. If specified for an update operation, this value must match + // the identifier for the latest existing version of the template. This restriction + // helps ensure that race conditions don't occur. If you don't specify a value for + // this parameter, Amazon Pinpoint does the following: + // + // * For a get operation, + // retrieves information about the active version of the template. + // + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. + // + // * + // For a delete operation, deletes the template, including all versions of the + // template. + Version *string + + noSmithyDocumentSerde +} + +type DeleteInAppTemplateOutput struct { + + // Provides information about an API request or response. + // + // This member is required. + MessageBody *types.MessageBody + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteInAppTemplateMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpDeleteInAppTemplate{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpDeleteInAppTemplate{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteInAppTemplateValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteInAppTemplate(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteInAppTemplate(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "mobiletargeting", + OperationName: "DeleteInAppTemplate", + } +} diff --git a/service/pinpoint/api_op_GetInAppMessages.go b/service/pinpoint/api_op_GetInAppMessages.go new file mode 100644 index 00000000000..cad79baa96b --- /dev/null +++ b/service/pinpoint/api_op_GetInAppMessages.go @@ -0,0 +1,129 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package pinpoint + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/pinpoint/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the in-app messages targeted for the provided endpoint ID. +func (c *Client) GetInAppMessages(ctx context.Context, params *GetInAppMessagesInput, optFns ...func(*Options)) (*GetInAppMessagesOutput, error) { + if params == nil { + params = &GetInAppMessagesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetInAppMessages", params, optFns, c.addOperationGetInAppMessagesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetInAppMessagesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetInAppMessagesInput struct { + + // The unique identifier for the application. This identifier is displayed as the + // Project ID on the Amazon Pinpoint console. + // + // This member is required. + ApplicationId *string + + // The unique identifier for the endpoint. + // + // This member is required. + EndpointId *string + + noSmithyDocumentSerde +} + +type GetInAppMessagesOutput struct { + + // Get in-app messages response object. + // + // This member is required. + InAppMessagesResponse *types.InAppMessagesResponse + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetInAppMessagesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetInAppMessages{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetInAppMessages{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetInAppMessagesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetInAppMessages(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetInAppMessages(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "mobiletargeting", + OperationName: "GetInAppMessages", + } +} diff --git a/service/pinpoint/api_op_GetInAppTemplate.go b/service/pinpoint/api_op_GetInAppTemplate.go new file mode 100644 index 00000000000..ba4e2162a87 --- /dev/null +++ b/service/pinpoint/api_op_GetInAppTemplate.go @@ -0,0 +1,148 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package pinpoint + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/pinpoint/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the content and settings of a message template for messages sent +// through the in-app channel. +func (c *Client) GetInAppTemplate(ctx context.Context, params *GetInAppTemplateInput, optFns ...func(*Options)) (*GetInAppTemplateOutput, error) { + if params == nil { + params = &GetInAppTemplateInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetInAppTemplate", params, optFns, c.addOperationGetInAppTemplateMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetInAppTemplateOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetInAppTemplateInput struct { + + // The name of the message template. A template name must start with an + // alphanumeric character and can contain a maximum of 128 characters. The + // characters can be alphanumeric characters, underscores (_), or hyphens (-). + // Template names are case sensitive. + // + // This member is required. + TemplateName *string + + // The unique identifier for the version of the message template to update, + // retrieve information about, or delete. To retrieve identifiers and other + // information for all the versions of a template, use the Template Versions + // resource. If specified, this value must match the identifier for an existing + // template version. If specified for an update operation, this value must match + // the identifier for the latest existing version of the template. This restriction + // helps ensure that race conditions don't occur. If you don't specify a value for + // this parameter, Amazon Pinpoint does the following: + // + // * For a get operation, + // retrieves information about the active version of the template. + // + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. + // + // * + // For a delete operation, deletes the template, including all versions of the + // template. + Version *string + + noSmithyDocumentSerde +} + +type GetInAppTemplateOutput struct { + + // In-App Template Response. + // + // This member is required. + InAppTemplateResponse *types.InAppTemplateResponse + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetInAppTemplateMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetInAppTemplate{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetInAppTemplate{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetInAppTemplateValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetInAppTemplate(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetInAppTemplate(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "mobiletargeting", + OperationName: "GetInAppTemplate", + } +} diff --git a/service/pinpoint/api_op_UpdateInAppTemplate.go b/service/pinpoint/api_op_UpdateInAppTemplate.go new file mode 100644 index 00000000000..d0b50ef4085 --- /dev/null +++ b/service/pinpoint/api_op_UpdateInAppTemplate.go @@ -0,0 +1,162 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package pinpoint + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/pinpoint/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates an existing message template for messages sent through the in-app +// message channel. +func (c *Client) UpdateInAppTemplate(ctx context.Context, params *UpdateInAppTemplateInput, optFns ...func(*Options)) (*UpdateInAppTemplateOutput, error) { + if params == nil { + params = &UpdateInAppTemplateInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateInAppTemplate", params, optFns, c.addOperationUpdateInAppTemplateMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateInAppTemplateOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateInAppTemplateInput struct { + + // InApp Template Request. + // + // This member is required. + InAppTemplateRequest *types.InAppTemplateRequest + + // The name of the message template. A template name must start with an + // alphanumeric character and can contain a maximum of 128 characters. The + // characters can be alphanumeric characters, underscores (_), or hyphens (-). + // Template names are case sensitive. + // + // This member is required. + TemplateName *string + + // Specifies whether to save the updates as a new version of the message template. + // Valid values are: true, save the updates as a new version; and, false, save the + // updates to (overwrite) the latest existing version of the template. If you don't + // specify a value for this parameter, Amazon Pinpoint saves the updates to + // (overwrites) the latest existing version of the template. If you specify a value + // of true for this parameter, don't specify a value for the version parameter. + // Otherwise, an error will occur. + CreateNewVersion bool + + // The unique identifier for the version of the message template to update, + // retrieve information about, or delete. To retrieve identifiers and other + // information for all the versions of a template, use the Template Versions + // resource. If specified, this value must match the identifier for an existing + // template version. If specified for an update operation, this value must match + // the identifier for the latest existing version of the template. This restriction + // helps ensure that race conditions don't occur. If you don't specify a value for + // this parameter, Amazon Pinpoint does the following: + // + // * For a get operation, + // retrieves information about the active version of the template. + // + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. + // + // * + // For a delete operation, deletes the template, including all versions of the + // template. + Version *string + + noSmithyDocumentSerde +} + +type UpdateInAppTemplateOutput struct { + + // Provides information about an API request or response. + // + // This member is required. + MessageBody *types.MessageBody + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateInAppTemplateMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpUpdateInAppTemplate{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpUpdateInAppTemplate{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateInAppTemplateValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateInAppTemplate(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateInAppTemplate(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "mobiletargeting", + OperationName: "UpdateInAppTemplate", + } +} diff --git a/service/pinpoint/deserializers.go b/service/pinpoint/deserializers.go index 1eb7fa3f004..e2424669280 100644 --- a/service/pinpoint/deserializers.go +++ b/service/pinpoint/deserializers.go @@ -840,6 +840,165 @@ func awsRestjson1_deserializeOpDocumentCreateImportJobOutput(v **CreateImportJob return nil } +type awsRestjson1_deserializeOpCreateInAppTemplate struct { +} + +func (*awsRestjson1_deserializeOpCreateInAppTemplate) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateInAppTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateInAppTemplate(response, &metadata) + } + output := &CreateInAppTemplateOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeDocumentTemplateCreateMessageBody(&output.TemplateCreateMessageBody, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateInAppTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("MethodNotAllowedException", errorCode): + return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateInAppTemplateOutput(v **CreateInAppTemplateOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateInAppTemplateOutput + if *v == nil { + sv = &CreateInAppTemplateOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TemplateCreateMessageBody": + if err := awsRestjson1_deserializeDocumentTemplateCreateMessageBody(&sv.TemplateCreateMessageBody, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + type awsRestjson1_deserializeOpCreateJourney struct { } @@ -3957,14 +4116,14 @@ func awsRestjson1_deserializeOpDocumentDeleteGcmChannelOutput(v **DeleteGcmChann return nil } -type awsRestjson1_deserializeOpDeleteJourney struct { +type awsRestjson1_deserializeOpDeleteInAppTemplate struct { } -func (*awsRestjson1_deserializeOpDeleteJourney) ID() string { +func (*awsRestjson1_deserializeOpDeleteInAppTemplate) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpDeleteJourney) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpDeleteInAppTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3978,9 +4137,9 @@ func (m *awsRestjson1_deserializeOpDeleteJourney) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorDeleteJourney(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorDeleteInAppTemplate(response, &metadata) } - output := &DeleteJourneyOutput{} + output := &DeleteInAppTemplateOutput{} out.Result = output var buff [1024]byte @@ -4001,7 +4160,7 @@ func (m *awsRestjson1_deserializeOpDeleteJourney) HandleDeserialize(ctx context. return out, metadata, err } - err = awsRestjson1_deserializeDocumentJourneyResponse(&output.JourneyResponse, shape) + err = awsRestjson1_deserializeDocumentMessageBody(&output.MessageBody, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4014,7 +4173,7 @@ func (m *awsRestjson1_deserializeOpDeleteJourney) HandleDeserialize(ctx context. return out, metadata, err } -func awsRestjson1_deserializeOpErrorDeleteJourney(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorDeleteInAppTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4086,7 +4245,7 @@ func awsRestjson1_deserializeOpErrorDeleteJourney(response *smithyhttp.Response, } } -func awsRestjson1_deserializeOpDocumentDeleteJourneyOutput(v **DeleteJourneyOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentDeleteInAppTemplateOutput(v **DeleteInAppTemplateOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -4099,17 +4258,17 @@ func awsRestjson1_deserializeOpDocumentDeleteJourneyOutput(v **DeleteJourneyOutp return fmt.Errorf("unexpected JSON type %v", value) } - var sv *DeleteJourneyOutput + var sv *DeleteInAppTemplateOutput if *v == nil { - sv = &DeleteJourneyOutput{} + sv = &DeleteInAppTemplateOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "JourneyResponse": - if err := awsRestjson1_deserializeDocumentJourneyResponse(&sv.JourneyResponse, value); err != nil { + case "MessageBody": + if err := awsRestjson1_deserializeDocumentMessageBody(&sv.MessageBody, value); err != nil { return err } @@ -4122,14 +4281,14 @@ func awsRestjson1_deserializeOpDocumentDeleteJourneyOutput(v **DeleteJourneyOutp return nil } -type awsRestjson1_deserializeOpDeletePushTemplate struct { +type awsRestjson1_deserializeOpDeleteJourney struct { } -func (*awsRestjson1_deserializeOpDeletePushTemplate) ID() string { +func (*awsRestjson1_deserializeOpDeleteJourney) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpDeletePushTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpDeleteJourney) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4143,9 +4302,9 @@ func (m *awsRestjson1_deserializeOpDeletePushTemplate) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorDeletePushTemplate(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorDeleteJourney(response, &metadata) } - output := &DeletePushTemplateOutput{} + output := &DeleteJourneyOutput{} out.Result = output var buff [1024]byte @@ -4166,7 +4325,7 @@ func (m *awsRestjson1_deserializeOpDeletePushTemplate) HandleDeserialize(ctx con return out, metadata, err } - err = awsRestjson1_deserializeDocumentMessageBody(&output.MessageBody, shape) + err = awsRestjson1_deserializeDocumentJourneyResponse(&output.JourneyResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4179,7 +4338,7 @@ func (m *awsRestjson1_deserializeOpDeletePushTemplate) HandleDeserialize(ctx con return out, metadata, err } -func awsRestjson1_deserializeOpErrorDeletePushTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorDeleteJourney(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4251,7 +4410,7 @@ func awsRestjson1_deserializeOpErrorDeletePushTemplate(response *smithyhttp.Resp } } -func awsRestjson1_deserializeOpDocumentDeletePushTemplateOutput(v **DeletePushTemplateOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentDeleteJourneyOutput(v **DeleteJourneyOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -4264,17 +4423,17 @@ func awsRestjson1_deserializeOpDocumentDeletePushTemplateOutput(v **DeletePushTe return fmt.Errorf("unexpected JSON type %v", value) } - var sv *DeletePushTemplateOutput + var sv *DeleteJourneyOutput if *v == nil { - sv = &DeletePushTemplateOutput{} + sv = &DeleteJourneyOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "MessageBody": - if err := awsRestjson1_deserializeDocumentMessageBody(&sv.MessageBody, value); err != nil { + case "JourneyResponse": + if err := awsRestjson1_deserializeDocumentJourneyResponse(&sv.JourneyResponse, value); err != nil { return err } @@ -4287,14 +4446,14 @@ func awsRestjson1_deserializeOpDocumentDeletePushTemplateOutput(v **DeletePushTe return nil } -type awsRestjson1_deserializeOpDeleteRecommenderConfiguration struct { +type awsRestjson1_deserializeOpDeletePushTemplate struct { } -func (*awsRestjson1_deserializeOpDeleteRecommenderConfiguration) ID() string { +func (*awsRestjson1_deserializeOpDeletePushTemplate) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpDeleteRecommenderConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpDeletePushTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4308,9 +4467,9 @@ func (m *awsRestjson1_deserializeOpDeleteRecommenderConfiguration) HandleDeseria } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorDeleteRecommenderConfiguration(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorDeletePushTemplate(response, &metadata) } - output := &DeleteRecommenderConfigurationOutput{} + output := &DeletePushTemplateOutput{} out.Result = output var buff [1024]byte @@ -4331,7 +4490,7 @@ func (m *awsRestjson1_deserializeOpDeleteRecommenderConfiguration) HandleDeseria return out, metadata, err } - err = awsRestjson1_deserializeDocumentRecommenderConfigurationResponse(&output.RecommenderConfigurationResponse, shape) + err = awsRestjson1_deserializeDocumentMessageBody(&output.MessageBody, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4344,7 +4503,7 @@ func (m *awsRestjson1_deserializeOpDeleteRecommenderConfiguration) HandleDeseria return out, metadata, err } -func awsRestjson1_deserializeOpErrorDeleteRecommenderConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorDeletePushTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4416,7 +4575,7 @@ func awsRestjson1_deserializeOpErrorDeleteRecommenderConfiguration(response *smi } } -func awsRestjson1_deserializeOpDocumentDeleteRecommenderConfigurationOutput(v **DeleteRecommenderConfigurationOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentDeletePushTemplateOutput(v **DeletePushTemplateOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -4429,17 +4588,17 @@ func awsRestjson1_deserializeOpDocumentDeleteRecommenderConfigurationOutput(v ** return fmt.Errorf("unexpected JSON type %v", value) } - var sv *DeleteRecommenderConfigurationOutput + var sv *DeletePushTemplateOutput if *v == nil { - sv = &DeleteRecommenderConfigurationOutput{} + sv = &DeletePushTemplateOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "RecommenderConfigurationResponse": - if err := awsRestjson1_deserializeDocumentRecommenderConfigurationResponse(&sv.RecommenderConfigurationResponse, value); err != nil { + case "MessageBody": + if err := awsRestjson1_deserializeDocumentMessageBody(&sv.MessageBody, value); err != nil { return err } @@ -4452,14 +4611,14 @@ func awsRestjson1_deserializeOpDocumentDeleteRecommenderConfigurationOutput(v ** return nil } -type awsRestjson1_deserializeOpDeleteSegment struct { +type awsRestjson1_deserializeOpDeleteRecommenderConfiguration struct { } -func (*awsRestjson1_deserializeOpDeleteSegment) ID() string { +func (*awsRestjson1_deserializeOpDeleteRecommenderConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpDeleteSegment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpDeleteRecommenderConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4473,9 +4632,9 @@ func (m *awsRestjson1_deserializeOpDeleteSegment) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorDeleteSegment(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorDeleteRecommenderConfiguration(response, &metadata) } - output := &DeleteSegmentOutput{} + output := &DeleteRecommenderConfigurationOutput{} out.Result = output var buff [1024]byte @@ -4496,7 +4655,7 @@ func (m *awsRestjson1_deserializeOpDeleteSegment) HandleDeserialize(ctx context. return out, metadata, err } - err = awsRestjson1_deserializeDocumentSegmentResponse(&output.SegmentResponse, shape) + err = awsRestjson1_deserializeDocumentRecommenderConfigurationResponse(&output.RecommenderConfigurationResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4509,7 +4668,7 @@ func (m *awsRestjson1_deserializeOpDeleteSegment) HandleDeserialize(ctx context. return out, metadata, err } -func awsRestjson1_deserializeOpErrorDeleteSegment(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorDeleteRecommenderConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4581,7 +4740,7 @@ func awsRestjson1_deserializeOpErrorDeleteSegment(response *smithyhttp.Response, } } -func awsRestjson1_deserializeOpDocumentDeleteSegmentOutput(v **DeleteSegmentOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentDeleteRecommenderConfigurationOutput(v **DeleteRecommenderConfigurationOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -4594,17 +4753,17 @@ func awsRestjson1_deserializeOpDocumentDeleteSegmentOutput(v **DeleteSegmentOutp return fmt.Errorf("unexpected JSON type %v", value) } - var sv *DeleteSegmentOutput + var sv *DeleteRecommenderConfigurationOutput if *v == nil { - sv = &DeleteSegmentOutput{} + sv = &DeleteRecommenderConfigurationOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "SegmentResponse": - if err := awsRestjson1_deserializeDocumentSegmentResponse(&sv.SegmentResponse, value); err != nil { + case "RecommenderConfigurationResponse": + if err := awsRestjson1_deserializeDocumentRecommenderConfigurationResponse(&sv.RecommenderConfigurationResponse, value); err != nil { return err } @@ -4617,14 +4776,14 @@ func awsRestjson1_deserializeOpDocumentDeleteSegmentOutput(v **DeleteSegmentOutp return nil } -type awsRestjson1_deserializeOpDeleteSmsChannel struct { +type awsRestjson1_deserializeOpDeleteSegment struct { } -func (*awsRestjson1_deserializeOpDeleteSmsChannel) ID() string { +func (*awsRestjson1_deserializeOpDeleteSegment) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpDeleteSmsChannel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpDeleteSegment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4638,9 +4797,9 @@ func (m *awsRestjson1_deserializeOpDeleteSmsChannel) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorDeleteSmsChannel(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorDeleteSegment(response, &metadata) } - output := &DeleteSmsChannelOutput{} + output := &DeleteSegmentOutput{} out.Result = output var buff [1024]byte @@ -4661,7 +4820,7 @@ func (m *awsRestjson1_deserializeOpDeleteSmsChannel) HandleDeserialize(ctx conte return out, metadata, err } - err = awsRestjson1_deserializeDocumentSMSChannelResponse(&output.SMSChannelResponse, shape) + err = awsRestjson1_deserializeDocumentSegmentResponse(&output.SegmentResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4674,7 +4833,7 @@ func (m *awsRestjson1_deserializeOpDeleteSmsChannel) HandleDeserialize(ctx conte return out, metadata, err } -func awsRestjson1_deserializeOpErrorDeleteSmsChannel(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorDeleteSegment(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4746,7 +4905,7 @@ func awsRestjson1_deserializeOpErrorDeleteSmsChannel(response *smithyhttp.Respon } } -func awsRestjson1_deserializeOpDocumentDeleteSmsChannelOutput(v **DeleteSmsChannelOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentDeleteSegmentOutput(v **DeleteSegmentOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -4759,17 +4918,182 @@ func awsRestjson1_deserializeOpDocumentDeleteSmsChannelOutput(v **DeleteSmsChann return fmt.Errorf("unexpected JSON type %v", value) } - var sv *DeleteSmsChannelOutput + var sv *DeleteSegmentOutput if *v == nil { - sv = &DeleteSmsChannelOutput{} + sv = &DeleteSegmentOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "SMSChannelResponse": - if err := awsRestjson1_deserializeDocumentSMSChannelResponse(&sv.SMSChannelResponse, value); err != nil { + case "SegmentResponse": + if err := awsRestjson1_deserializeDocumentSegmentResponse(&sv.SegmentResponse, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpDeleteSmsChannel struct { +} + +func (*awsRestjson1_deserializeOpDeleteSmsChannel) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpDeleteSmsChannel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorDeleteSmsChannel(response, &metadata) + } + output := &DeleteSmsChannelOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeDocumentSMSChannelResponse(&output.SMSChannelResponse, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorDeleteSmsChannel(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("MethodNotAllowedException", errorCode): + return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("PayloadTooLargeException", errorCode): + return awsRestjson1_deserializeErrorPayloadTooLargeException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentDeleteSmsChannelOutput(v **DeleteSmsChannelOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteSmsChannelOutput + if *v == nil { + sv = &DeleteSmsChannelOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "SMSChannelResponse": + if err := awsRestjson1_deserializeDocumentSMSChannelResponse(&sv.SMSChannelResponse, value); err != nil { return err } @@ -9732,14 +10056,14 @@ func awsRestjson1_deserializeOpDocumentGetImportJobsOutput(v **GetImportJobsOutp return nil } -type awsRestjson1_deserializeOpGetJourney struct { +type awsRestjson1_deserializeOpGetInAppMessages struct { } -func (*awsRestjson1_deserializeOpGetJourney) ID() string { +func (*awsRestjson1_deserializeOpGetInAppMessages) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetJourney) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetInAppMessages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9753,9 +10077,9 @@ func (m *awsRestjson1_deserializeOpGetJourney) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetJourney(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetInAppMessages(response, &metadata) } - output := &GetJourneyOutput{} + output := &GetInAppMessagesOutput{} out.Result = output var buff [1024]byte @@ -9776,7 +10100,7 @@ func (m *awsRestjson1_deserializeOpGetJourney) HandleDeserialize(ctx context.Con return out, metadata, err } - err = awsRestjson1_deserializeDocumentJourneyResponse(&output.JourneyResponse, shape) + err = awsRestjson1_deserializeDocumentInAppMessagesResponse(&output.InAppMessagesResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9789,7 +10113,7 @@ func (m *awsRestjson1_deserializeOpGetJourney) HandleDeserialize(ctx context.Con return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetJourney(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetInAppMessages(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9861,7 +10185,7 @@ func awsRestjson1_deserializeOpErrorGetJourney(response *smithyhttp.Response, me } } -func awsRestjson1_deserializeOpDocumentGetJourneyOutput(v **GetJourneyOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetInAppMessagesOutput(v **GetInAppMessagesOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -9874,17 +10198,17 @@ func awsRestjson1_deserializeOpDocumentGetJourneyOutput(v **GetJourneyOutput, va return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetJourneyOutput + var sv *GetInAppMessagesOutput if *v == nil { - sv = &GetJourneyOutput{} + sv = &GetInAppMessagesOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "JourneyResponse": - if err := awsRestjson1_deserializeDocumentJourneyResponse(&sv.JourneyResponse, value); err != nil { + case "InAppMessagesResponse": + if err := awsRestjson1_deserializeDocumentInAppMessagesResponse(&sv.InAppMessagesResponse, value); err != nil { return err } @@ -9897,14 +10221,14 @@ func awsRestjson1_deserializeOpDocumentGetJourneyOutput(v **GetJourneyOutput, va return nil } -type awsRestjson1_deserializeOpGetJourneyDateRangeKpi struct { +type awsRestjson1_deserializeOpGetInAppTemplate struct { } -func (*awsRestjson1_deserializeOpGetJourneyDateRangeKpi) ID() string { +func (*awsRestjson1_deserializeOpGetInAppTemplate) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetJourneyDateRangeKpi) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetInAppTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9918,9 +10242,9 @@ func (m *awsRestjson1_deserializeOpGetJourneyDateRangeKpi) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetJourneyDateRangeKpi(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetInAppTemplate(response, &metadata) } - output := &GetJourneyDateRangeKpiOutput{} + output := &GetInAppTemplateOutput{} out.Result = output var buff [1024]byte @@ -9941,7 +10265,7 @@ func (m *awsRestjson1_deserializeOpGetJourneyDateRangeKpi) HandleDeserialize(ctx return out, metadata, err } - err = awsRestjson1_deserializeDocumentJourneyDateRangeKpiResponse(&output.JourneyDateRangeKpiResponse, shape) + err = awsRestjson1_deserializeDocumentInAppTemplateResponse(&output.InAppTemplateResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9954,7 +10278,7 @@ func (m *awsRestjson1_deserializeOpGetJourneyDateRangeKpi) HandleDeserialize(ctx return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetJourneyDateRangeKpi(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetInAppTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10026,7 +10350,7 @@ func awsRestjson1_deserializeOpErrorGetJourneyDateRangeKpi(response *smithyhttp. } } -func awsRestjson1_deserializeOpDocumentGetJourneyDateRangeKpiOutput(v **GetJourneyDateRangeKpiOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetInAppTemplateOutput(v **GetInAppTemplateOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -10039,17 +10363,17 @@ func awsRestjson1_deserializeOpDocumentGetJourneyDateRangeKpiOutput(v **GetJourn return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetJourneyDateRangeKpiOutput + var sv *GetInAppTemplateOutput if *v == nil { - sv = &GetJourneyDateRangeKpiOutput{} + sv = &GetInAppTemplateOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "JourneyDateRangeKpiResponse": - if err := awsRestjson1_deserializeDocumentJourneyDateRangeKpiResponse(&sv.JourneyDateRangeKpiResponse, value); err != nil { + case "InAppTemplateResponse": + if err := awsRestjson1_deserializeDocumentInAppTemplateResponse(&sv.InAppTemplateResponse, value); err != nil { return err } @@ -10062,14 +10386,14 @@ func awsRestjson1_deserializeOpDocumentGetJourneyDateRangeKpiOutput(v **GetJourn return nil } -type awsRestjson1_deserializeOpGetJourneyExecutionActivityMetrics struct { +type awsRestjson1_deserializeOpGetJourney struct { } -func (*awsRestjson1_deserializeOpGetJourneyExecutionActivityMetrics) ID() string { +func (*awsRestjson1_deserializeOpGetJourney) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetJourneyExecutionActivityMetrics) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetJourney) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10083,9 +10407,9 @@ func (m *awsRestjson1_deserializeOpGetJourneyExecutionActivityMetrics) HandleDes } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetJourneyExecutionActivityMetrics(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetJourney(response, &metadata) } - output := &GetJourneyExecutionActivityMetricsOutput{} + output := &GetJourneyOutput{} out.Result = output var buff [1024]byte @@ -10106,7 +10430,7 @@ func (m *awsRestjson1_deserializeOpGetJourneyExecutionActivityMetrics) HandleDes return out, metadata, err } - err = awsRestjson1_deserializeDocumentJourneyExecutionActivityMetricsResponse(&output.JourneyExecutionActivityMetricsResponse, shape) + err = awsRestjson1_deserializeDocumentJourneyResponse(&output.JourneyResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10119,7 +10443,7 @@ func (m *awsRestjson1_deserializeOpGetJourneyExecutionActivityMetrics) HandleDes return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetJourneyExecutionActivityMetrics(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetJourney(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10191,7 +10515,7 @@ func awsRestjson1_deserializeOpErrorGetJourneyExecutionActivityMetrics(response } } -func awsRestjson1_deserializeOpDocumentGetJourneyExecutionActivityMetricsOutput(v **GetJourneyExecutionActivityMetricsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetJourneyOutput(v **GetJourneyOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -10204,17 +10528,17 @@ func awsRestjson1_deserializeOpDocumentGetJourneyExecutionActivityMetricsOutput( return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetJourneyExecutionActivityMetricsOutput + var sv *GetJourneyOutput if *v == nil { - sv = &GetJourneyExecutionActivityMetricsOutput{} + sv = &GetJourneyOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "JourneyExecutionActivityMetricsResponse": - if err := awsRestjson1_deserializeDocumentJourneyExecutionActivityMetricsResponse(&sv.JourneyExecutionActivityMetricsResponse, value); err != nil { + case "JourneyResponse": + if err := awsRestjson1_deserializeDocumentJourneyResponse(&sv.JourneyResponse, value); err != nil { return err } @@ -10227,14 +10551,14 @@ func awsRestjson1_deserializeOpDocumentGetJourneyExecutionActivityMetricsOutput( return nil } -type awsRestjson1_deserializeOpGetJourneyExecutionMetrics struct { +type awsRestjson1_deserializeOpGetJourneyDateRangeKpi struct { } -func (*awsRestjson1_deserializeOpGetJourneyExecutionMetrics) ID() string { +func (*awsRestjson1_deserializeOpGetJourneyDateRangeKpi) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetJourneyExecutionMetrics) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetJourneyDateRangeKpi) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10248,9 +10572,9 @@ func (m *awsRestjson1_deserializeOpGetJourneyExecutionMetrics) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetJourneyExecutionMetrics(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetJourneyDateRangeKpi(response, &metadata) } - output := &GetJourneyExecutionMetricsOutput{} + output := &GetJourneyDateRangeKpiOutput{} out.Result = output var buff [1024]byte @@ -10271,7 +10595,7 @@ func (m *awsRestjson1_deserializeOpGetJourneyExecutionMetrics) HandleDeserialize return out, metadata, err } - err = awsRestjson1_deserializeDocumentJourneyExecutionMetricsResponse(&output.JourneyExecutionMetricsResponse, shape) + err = awsRestjson1_deserializeDocumentJourneyDateRangeKpiResponse(&output.JourneyDateRangeKpiResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10284,7 +10608,7 @@ func (m *awsRestjson1_deserializeOpGetJourneyExecutionMetrics) HandleDeserialize return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetJourneyExecutionMetrics(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetJourneyDateRangeKpi(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10356,7 +10680,7 @@ func awsRestjson1_deserializeOpErrorGetJourneyExecutionMetrics(response *smithyh } } -func awsRestjson1_deserializeOpDocumentGetJourneyExecutionMetricsOutput(v **GetJourneyExecutionMetricsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetJourneyDateRangeKpiOutput(v **GetJourneyDateRangeKpiOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -10369,17 +10693,17 @@ func awsRestjson1_deserializeOpDocumentGetJourneyExecutionMetricsOutput(v **GetJ return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetJourneyExecutionMetricsOutput + var sv *GetJourneyDateRangeKpiOutput if *v == nil { - sv = &GetJourneyExecutionMetricsOutput{} + sv = &GetJourneyDateRangeKpiOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "JourneyExecutionMetricsResponse": - if err := awsRestjson1_deserializeDocumentJourneyExecutionMetricsResponse(&sv.JourneyExecutionMetricsResponse, value); err != nil { + case "JourneyDateRangeKpiResponse": + if err := awsRestjson1_deserializeDocumentJourneyDateRangeKpiResponse(&sv.JourneyDateRangeKpiResponse, value); err != nil { return err } @@ -10392,14 +10716,14 @@ func awsRestjson1_deserializeOpDocumentGetJourneyExecutionMetricsOutput(v **GetJ return nil } -type awsRestjson1_deserializeOpGetPushTemplate struct { +type awsRestjson1_deserializeOpGetJourneyExecutionActivityMetrics struct { } -func (*awsRestjson1_deserializeOpGetPushTemplate) ID() string { +func (*awsRestjson1_deserializeOpGetJourneyExecutionActivityMetrics) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetPushTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetJourneyExecutionActivityMetrics) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10413,9 +10737,9 @@ func (m *awsRestjson1_deserializeOpGetPushTemplate) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetPushTemplate(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetJourneyExecutionActivityMetrics(response, &metadata) } - output := &GetPushTemplateOutput{} + output := &GetJourneyExecutionActivityMetricsOutput{} out.Result = output var buff [1024]byte @@ -10436,7 +10760,7 @@ func (m *awsRestjson1_deserializeOpGetPushTemplate) HandleDeserialize(ctx contex return out, metadata, err } - err = awsRestjson1_deserializeDocumentPushNotificationTemplateResponse(&output.PushNotificationTemplateResponse, shape) + err = awsRestjson1_deserializeDocumentJourneyExecutionActivityMetricsResponse(&output.JourneyExecutionActivityMetricsResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10449,7 +10773,7 @@ func (m *awsRestjson1_deserializeOpGetPushTemplate) HandleDeserialize(ctx contex return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetPushTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetJourneyExecutionActivityMetrics(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10521,7 +10845,7 @@ func awsRestjson1_deserializeOpErrorGetPushTemplate(response *smithyhttp.Respons } } -func awsRestjson1_deserializeOpDocumentGetPushTemplateOutput(v **GetPushTemplateOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetJourneyExecutionActivityMetricsOutput(v **GetJourneyExecutionActivityMetricsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -10534,17 +10858,17 @@ func awsRestjson1_deserializeOpDocumentGetPushTemplateOutput(v **GetPushTemplate return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetPushTemplateOutput + var sv *GetJourneyExecutionActivityMetricsOutput if *v == nil { - sv = &GetPushTemplateOutput{} + sv = &GetJourneyExecutionActivityMetricsOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "PushNotificationTemplateResponse": - if err := awsRestjson1_deserializeDocumentPushNotificationTemplateResponse(&sv.PushNotificationTemplateResponse, value); err != nil { + case "JourneyExecutionActivityMetricsResponse": + if err := awsRestjson1_deserializeDocumentJourneyExecutionActivityMetricsResponse(&sv.JourneyExecutionActivityMetricsResponse, value); err != nil { return err } @@ -10557,14 +10881,14 @@ func awsRestjson1_deserializeOpDocumentGetPushTemplateOutput(v **GetPushTemplate return nil } -type awsRestjson1_deserializeOpGetRecommenderConfiguration struct { +type awsRestjson1_deserializeOpGetJourneyExecutionMetrics struct { } -func (*awsRestjson1_deserializeOpGetRecommenderConfiguration) ID() string { +func (*awsRestjson1_deserializeOpGetJourneyExecutionMetrics) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetRecommenderConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetJourneyExecutionMetrics) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10578,9 +10902,9 @@ func (m *awsRestjson1_deserializeOpGetRecommenderConfiguration) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetRecommenderConfiguration(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetJourneyExecutionMetrics(response, &metadata) } - output := &GetRecommenderConfigurationOutput{} + output := &GetJourneyExecutionMetricsOutput{} out.Result = output var buff [1024]byte @@ -10601,7 +10925,7 @@ func (m *awsRestjson1_deserializeOpGetRecommenderConfiguration) HandleDeserializ return out, metadata, err } - err = awsRestjson1_deserializeDocumentRecommenderConfigurationResponse(&output.RecommenderConfigurationResponse, shape) + err = awsRestjson1_deserializeDocumentJourneyExecutionMetricsResponse(&output.JourneyExecutionMetricsResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10614,7 +10938,7 @@ func (m *awsRestjson1_deserializeOpGetRecommenderConfiguration) HandleDeserializ return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetRecommenderConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetJourneyExecutionMetrics(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10686,7 +11010,7 @@ func awsRestjson1_deserializeOpErrorGetRecommenderConfiguration(response *smithy } } -func awsRestjson1_deserializeOpDocumentGetRecommenderConfigurationOutput(v **GetRecommenderConfigurationOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetJourneyExecutionMetricsOutput(v **GetJourneyExecutionMetricsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -10699,17 +11023,17 @@ func awsRestjson1_deserializeOpDocumentGetRecommenderConfigurationOutput(v **Get return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetRecommenderConfigurationOutput + var sv *GetJourneyExecutionMetricsOutput if *v == nil { - sv = &GetRecommenderConfigurationOutput{} + sv = &GetJourneyExecutionMetricsOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "RecommenderConfigurationResponse": - if err := awsRestjson1_deserializeDocumentRecommenderConfigurationResponse(&sv.RecommenderConfigurationResponse, value); err != nil { + case "JourneyExecutionMetricsResponse": + if err := awsRestjson1_deserializeDocumentJourneyExecutionMetricsResponse(&sv.JourneyExecutionMetricsResponse, value); err != nil { return err } @@ -10722,14 +11046,14 @@ func awsRestjson1_deserializeOpDocumentGetRecommenderConfigurationOutput(v **Get return nil } -type awsRestjson1_deserializeOpGetRecommenderConfigurations struct { +type awsRestjson1_deserializeOpGetPushTemplate struct { } -func (*awsRestjson1_deserializeOpGetRecommenderConfigurations) ID() string { +func (*awsRestjson1_deserializeOpGetPushTemplate) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetRecommenderConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetPushTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10743,9 +11067,9 @@ func (m *awsRestjson1_deserializeOpGetRecommenderConfigurations) HandleDeseriali } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetRecommenderConfigurations(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetPushTemplate(response, &metadata) } - output := &GetRecommenderConfigurationsOutput{} + output := &GetPushTemplateOutput{} out.Result = output var buff [1024]byte @@ -10766,7 +11090,7 @@ func (m *awsRestjson1_deserializeOpGetRecommenderConfigurations) HandleDeseriali return out, metadata, err } - err = awsRestjson1_deserializeDocumentListRecommenderConfigurationsResponse(&output.ListRecommenderConfigurationsResponse, shape) + err = awsRestjson1_deserializeDocumentPushNotificationTemplateResponse(&output.PushNotificationTemplateResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10779,7 +11103,7 @@ func (m *awsRestjson1_deserializeOpGetRecommenderConfigurations) HandleDeseriali return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetRecommenderConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetPushTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -10851,7 +11175,7 @@ func awsRestjson1_deserializeOpErrorGetRecommenderConfigurations(response *smith } } -func awsRestjson1_deserializeOpDocumentGetRecommenderConfigurationsOutput(v **GetRecommenderConfigurationsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetPushTemplateOutput(v **GetPushTemplateOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -10864,17 +11188,17 @@ func awsRestjson1_deserializeOpDocumentGetRecommenderConfigurationsOutput(v **Ge return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetRecommenderConfigurationsOutput + var sv *GetPushTemplateOutput if *v == nil { - sv = &GetRecommenderConfigurationsOutput{} + sv = &GetPushTemplateOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "ListRecommenderConfigurationsResponse": - if err := awsRestjson1_deserializeDocumentListRecommenderConfigurationsResponse(&sv.ListRecommenderConfigurationsResponse, value); err != nil { + case "PushNotificationTemplateResponse": + if err := awsRestjson1_deserializeDocumentPushNotificationTemplateResponse(&sv.PushNotificationTemplateResponse, value); err != nil { return err } @@ -10887,14 +11211,14 @@ func awsRestjson1_deserializeOpDocumentGetRecommenderConfigurationsOutput(v **Ge return nil } -type awsRestjson1_deserializeOpGetSegment struct { +type awsRestjson1_deserializeOpGetRecommenderConfiguration struct { } -func (*awsRestjson1_deserializeOpGetSegment) ID() string { +func (*awsRestjson1_deserializeOpGetRecommenderConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetSegment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetRecommenderConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -10908,9 +11232,9 @@ func (m *awsRestjson1_deserializeOpGetSegment) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetSegment(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetRecommenderConfiguration(response, &metadata) } - output := &GetSegmentOutput{} + output := &GetRecommenderConfigurationOutput{} out.Result = output var buff [1024]byte @@ -10931,7 +11255,7 @@ func (m *awsRestjson1_deserializeOpGetSegment) HandleDeserialize(ctx context.Con return out, metadata, err } - err = awsRestjson1_deserializeDocumentSegmentResponse(&output.SegmentResponse, shape) + err = awsRestjson1_deserializeDocumentRecommenderConfigurationResponse(&output.RecommenderConfigurationResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -10944,7 +11268,7 @@ func (m *awsRestjson1_deserializeOpGetSegment) HandleDeserialize(ctx context.Con return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetSegment(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetRecommenderConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11016,7 +11340,7 @@ func awsRestjson1_deserializeOpErrorGetSegment(response *smithyhttp.Response, me } } -func awsRestjson1_deserializeOpDocumentGetSegmentOutput(v **GetSegmentOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetRecommenderConfigurationOutput(v **GetRecommenderConfigurationOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11029,17 +11353,17 @@ func awsRestjson1_deserializeOpDocumentGetSegmentOutput(v **GetSegmentOutput, va return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetSegmentOutput + var sv *GetRecommenderConfigurationOutput if *v == nil { - sv = &GetSegmentOutput{} + sv = &GetRecommenderConfigurationOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "SegmentResponse": - if err := awsRestjson1_deserializeDocumentSegmentResponse(&sv.SegmentResponse, value); err != nil { + case "RecommenderConfigurationResponse": + if err := awsRestjson1_deserializeDocumentRecommenderConfigurationResponse(&sv.RecommenderConfigurationResponse, value); err != nil { return err } @@ -11052,14 +11376,14 @@ func awsRestjson1_deserializeOpDocumentGetSegmentOutput(v **GetSegmentOutput, va return nil } -type awsRestjson1_deserializeOpGetSegmentExportJobs struct { +type awsRestjson1_deserializeOpGetRecommenderConfigurations struct { } -func (*awsRestjson1_deserializeOpGetSegmentExportJobs) ID() string { +func (*awsRestjson1_deserializeOpGetRecommenderConfigurations) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetSegmentExportJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetRecommenderConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11073,9 +11397,9 @@ func (m *awsRestjson1_deserializeOpGetSegmentExportJobs) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetSegmentExportJobs(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetRecommenderConfigurations(response, &metadata) } - output := &GetSegmentExportJobsOutput{} + output := &GetRecommenderConfigurationsOutput{} out.Result = output var buff [1024]byte @@ -11096,7 +11420,7 @@ func (m *awsRestjson1_deserializeOpGetSegmentExportJobs) HandleDeserialize(ctx c return out, metadata, err } - err = awsRestjson1_deserializeDocumentExportJobsResponse(&output.ExportJobsResponse, shape) + err = awsRestjson1_deserializeDocumentListRecommenderConfigurationsResponse(&output.ListRecommenderConfigurationsResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11109,7 +11433,7 @@ func (m *awsRestjson1_deserializeOpGetSegmentExportJobs) HandleDeserialize(ctx c return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetSegmentExportJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetRecommenderConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11181,7 +11505,7 @@ func awsRestjson1_deserializeOpErrorGetSegmentExportJobs(response *smithyhttp.Re } } -func awsRestjson1_deserializeOpDocumentGetSegmentExportJobsOutput(v **GetSegmentExportJobsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetRecommenderConfigurationsOutput(v **GetRecommenderConfigurationsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11194,17 +11518,17 @@ func awsRestjson1_deserializeOpDocumentGetSegmentExportJobsOutput(v **GetSegment return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetSegmentExportJobsOutput + var sv *GetRecommenderConfigurationsOutput if *v == nil { - sv = &GetSegmentExportJobsOutput{} + sv = &GetRecommenderConfigurationsOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "ExportJobsResponse": - if err := awsRestjson1_deserializeDocumentExportJobsResponse(&sv.ExportJobsResponse, value); err != nil { + case "ListRecommenderConfigurationsResponse": + if err := awsRestjson1_deserializeDocumentListRecommenderConfigurationsResponse(&sv.ListRecommenderConfigurationsResponse, value); err != nil { return err } @@ -11217,14 +11541,14 @@ func awsRestjson1_deserializeOpDocumentGetSegmentExportJobsOutput(v **GetSegment return nil } -type awsRestjson1_deserializeOpGetSegmentImportJobs struct { +type awsRestjson1_deserializeOpGetSegment struct { } -func (*awsRestjson1_deserializeOpGetSegmentImportJobs) ID() string { +func (*awsRestjson1_deserializeOpGetSegment) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetSegmentImportJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetSegment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11238,9 +11562,9 @@ func (m *awsRestjson1_deserializeOpGetSegmentImportJobs) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetSegmentImportJobs(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetSegment(response, &metadata) } - output := &GetSegmentImportJobsOutput{} + output := &GetSegmentOutput{} out.Result = output var buff [1024]byte @@ -11261,7 +11585,7 @@ func (m *awsRestjson1_deserializeOpGetSegmentImportJobs) HandleDeserialize(ctx c return out, metadata, err } - err = awsRestjson1_deserializeDocumentImportJobsResponse(&output.ImportJobsResponse, shape) + err = awsRestjson1_deserializeDocumentSegmentResponse(&output.SegmentResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11274,7 +11598,7 @@ func (m *awsRestjson1_deserializeOpGetSegmentImportJobs) HandleDeserialize(ctx c return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetSegmentImportJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetSegment(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11346,7 +11670,7 @@ func awsRestjson1_deserializeOpErrorGetSegmentImportJobs(response *smithyhttp.Re } } -func awsRestjson1_deserializeOpDocumentGetSegmentImportJobsOutput(v **GetSegmentImportJobsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetSegmentOutput(v **GetSegmentOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11359,17 +11683,17 @@ func awsRestjson1_deserializeOpDocumentGetSegmentImportJobsOutput(v **GetSegment return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetSegmentImportJobsOutput + var sv *GetSegmentOutput if *v == nil { - sv = &GetSegmentImportJobsOutput{} + sv = &GetSegmentOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "ImportJobsResponse": - if err := awsRestjson1_deserializeDocumentImportJobsResponse(&sv.ImportJobsResponse, value); err != nil { + case "SegmentResponse": + if err := awsRestjson1_deserializeDocumentSegmentResponse(&sv.SegmentResponse, value); err != nil { return err } @@ -11382,14 +11706,14 @@ func awsRestjson1_deserializeOpDocumentGetSegmentImportJobsOutput(v **GetSegment return nil } -type awsRestjson1_deserializeOpGetSegments struct { +type awsRestjson1_deserializeOpGetSegmentExportJobs struct { } -func (*awsRestjson1_deserializeOpGetSegments) ID() string { +func (*awsRestjson1_deserializeOpGetSegmentExportJobs) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetSegments) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetSegmentExportJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11403,9 +11727,9 @@ func (m *awsRestjson1_deserializeOpGetSegments) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetSegments(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetSegmentExportJobs(response, &metadata) } - output := &GetSegmentsOutput{} + output := &GetSegmentExportJobsOutput{} out.Result = output var buff [1024]byte @@ -11426,7 +11750,7 @@ func (m *awsRestjson1_deserializeOpGetSegments) HandleDeserialize(ctx context.Co return out, metadata, err } - err = awsRestjson1_deserializeDocumentSegmentsResponse(&output.SegmentsResponse, shape) + err = awsRestjson1_deserializeDocumentExportJobsResponse(&output.ExportJobsResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11439,7 +11763,7 @@ func (m *awsRestjson1_deserializeOpGetSegments) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetSegments(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetSegmentExportJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11511,7 +11835,7 @@ func awsRestjson1_deserializeOpErrorGetSegments(response *smithyhttp.Response, m } } -func awsRestjson1_deserializeOpDocumentGetSegmentsOutput(v **GetSegmentsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetSegmentExportJobsOutput(v **GetSegmentExportJobsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11524,17 +11848,17 @@ func awsRestjson1_deserializeOpDocumentGetSegmentsOutput(v **GetSegmentsOutput, return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetSegmentsOutput + var sv *GetSegmentExportJobsOutput if *v == nil { - sv = &GetSegmentsOutput{} + sv = &GetSegmentExportJobsOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "SegmentsResponse": - if err := awsRestjson1_deserializeDocumentSegmentsResponse(&sv.SegmentsResponse, value); err != nil { + case "ExportJobsResponse": + if err := awsRestjson1_deserializeDocumentExportJobsResponse(&sv.ExportJobsResponse, value); err != nil { return err } @@ -11547,14 +11871,14 @@ func awsRestjson1_deserializeOpDocumentGetSegmentsOutput(v **GetSegmentsOutput, return nil } -type awsRestjson1_deserializeOpGetSegmentVersion struct { +type awsRestjson1_deserializeOpGetSegmentImportJobs struct { } -func (*awsRestjson1_deserializeOpGetSegmentVersion) ID() string { +func (*awsRestjson1_deserializeOpGetSegmentImportJobs) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetSegmentVersion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetSegmentImportJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11568,9 +11892,9 @@ func (m *awsRestjson1_deserializeOpGetSegmentVersion) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetSegmentVersion(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetSegmentImportJobs(response, &metadata) } - output := &GetSegmentVersionOutput{} + output := &GetSegmentImportJobsOutput{} out.Result = output var buff [1024]byte @@ -11591,7 +11915,7 @@ func (m *awsRestjson1_deserializeOpGetSegmentVersion) HandleDeserialize(ctx cont return out, metadata, err } - err = awsRestjson1_deserializeDocumentSegmentResponse(&output.SegmentResponse, shape) + err = awsRestjson1_deserializeDocumentImportJobsResponse(&output.ImportJobsResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11604,7 +11928,7 @@ func (m *awsRestjson1_deserializeOpGetSegmentVersion) HandleDeserialize(ctx cont return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetSegmentVersion(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetSegmentImportJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11676,7 +12000,7 @@ func awsRestjson1_deserializeOpErrorGetSegmentVersion(response *smithyhttp.Respo } } -func awsRestjson1_deserializeOpDocumentGetSegmentVersionOutput(v **GetSegmentVersionOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetSegmentImportJobsOutput(v **GetSegmentImportJobsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11689,17 +12013,17 @@ func awsRestjson1_deserializeOpDocumentGetSegmentVersionOutput(v **GetSegmentVer return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetSegmentVersionOutput + var sv *GetSegmentImportJobsOutput if *v == nil { - sv = &GetSegmentVersionOutput{} + sv = &GetSegmentImportJobsOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "SegmentResponse": - if err := awsRestjson1_deserializeDocumentSegmentResponse(&sv.SegmentResponse, value); err != nil { + case "ImportJobsResponse": + if err := awsRestjson1_deserializeDocumentImportJobsResponse(&sv.ImportJobsResponse, value); err != nil { return err } @@ -11712,14 +12036,14 @@ func awsRestjson1_deserializeOpDocumentGetSegmentVersionOutput(v **GetSegmentVer return nil } -type awsRestjson1_deserializeOpGetSegmentVersions struct { +type awsRestjson1_deserializeOpGetSegments struct { } -func (*awsRestjson1_deserializeOpGetSegmentVersions) ID() string { +func (*awsRestjson1_deserializeOpGetSegments) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetSegmentVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetSegments) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11733,9 +12057,9 @@ func (m *awsRestjson1_deserializeOpGetSegmentVersions) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetSegmentVersions(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetSegments(response, &metadata) } - output := &GetSegmentVersionsOutput{} + output := &GetSegmentsOutput{} out.Result = output var buff [1024]byte @@ -11769,7 +12093,7 @@ func (m *awsRestjson1_deserializeOpGetSegmentVersions) HandleDeserialize(ctx con return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetSegmentVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetSegments(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -11841,7 +12165,7 @@ func awsRestjson1_deserializeOpErrorGetSegmentVersions(response *smithyhttp.Resp } } -func awsRestjson1_deserializeOpDocumentGetSegmentVersionsOutput(v **GetSegmentVersionsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetSegmentsOutput(v **GetSegmentsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -11854,9 +12178,9 @@ func awsRestjson1_deserializeOpDocumentGetSegmentVersionsOutput(v **GetSegmentVe return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetSegmentVersionsOutput + var sv *GetSegmentsOutput if *v == nil { - sv = &GetSegmentVersionsOutput{} + sv = &GetSegmentsOutput{} } else { sv = *v } @@ -11877,14 +12201,14 @@ func awsRestjson1_deserializeOpDocumentGetSegmentVersionsOutput(v **GetSegmentVe return nil } -type awsRestjson1_deserializeOpGetSmsChannel struct { +type awsRestjson1_deserializeOpGetSegmentVersion struct { } -func (*awsRestjson1_deserializeOpGetSmsChannel) ID() string { +func (*awsRestjson1_deserializeOpGetSegmentVersion) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetSmsChannel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetSegmentVersion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -11898,9 +12222,9 @@ func (m *awsRestjson1_deserializeOpGetSmsChannel) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetSmsChannel(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetSegmentVersion(response, &metadata) } - output := &GetSmsChannelOutput{} + output := &GetSegmentVersionOutput{} out.Result = output var buff [1024]byte @@ -11921,7 +12245,7 @@ func (m *awsRestjson1_deserializeOpGetSmsChannel) HandleDeserialize(ctx context. return out, metadata, err } - err = awsRestjson1_deserializeDocumentSMSChannelResponse(&output.SMSChannelResponse, shape) + err = awsRestjson1_deserializeDocumentSegmentResponse(&output.SegmentResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -11934,7 +12258,7 @@ func (m *awsRestjson1_deserializeOpGetSmsChannel) HandleDeserialize(ctx context. return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetSmsChannel(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetSegmentVersion(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12006,7 +12330,7 @@ func awsRestjson1_deserializeOpErrorGetSmsChannel(response *smithyhttp.Response, } } -func awsRestjson1_deserializeOpDocumentGetSmsChannelOutput(v **GetSmsChannelOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetSegmentVersionOutput(v **GetSegmentVersionOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12019,17 +12343,17 @@ func awsRestjson1_deserializeOpDocumentGetSmsChannelOutput(v **GetSmsChannelOutp return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetSmsChannelOutput + var sv *GetSegmentVersionOutput if *v == nil { - sv = &GetSmsChannelOutput{} + sv = &GetSegmentVersionOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "SMSChannelResponse": - if err := awsRestjson1_deserializeDocumentSMSChannelResponse(&sv.SMSChannelResponse, value); err != nil { + case "SegmentResponse": + if err := awsRestjson1_deserializeDocumentSegmentResponse(&sv.SegmentResponse, value); err != nil { return err } @@ -12042,14 +12366,14 @@ func awsRestjson1_deserializeOpDocumentGetSmsChannelOutput(v **GetSmsChannelOutp return nil } -type awsRestjson1_deserializeOpGetSmsTemplate struct { +type awsRestjson1_deserializeOpGetSegmentVersions struct { } -func (*awsRestjson1_deserializeOpGetSmsTemplate) ID() string { +func (*awsRestjson1_deserializeOpGetSegmentVersions) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetSmsTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetSegmentVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12063,9 +12387,9 @@ func (m *awsRestjson1_deserializeOpGetSmsTemplate) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetSmsTemplate(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetSegmentVersions(response, &metadata) } - output := &GetSmsTemplateOutput{} + output := &GetSegmentVersionsOutput{} out.Result = output var buff [1024]byte @@ -12086,7 +12410,7 @@ func (m *awsRestjson1_deserializeOpGetSmsTemplate) HandleDeserialize(ctx context return out, metadata, err } - err = awsRestjson1_deserializeDocumentSMSTemplateResponse(&output.SMSTemplateResponse, shape) + err = awsRestjson1_deserializeDocumentSegmentsResponse(&output.SegmentsResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12099,7 +12423,7 @@ func (m *awsRestjson1_deserializeOpGetSmsTemplate) HandleDeserialize(ctx context return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetSmsTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetSegmentVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12171,7 +12495,7 @@ func awsRestjson1_deserializeOpErrorGetSmsTemplate(response *smithyhttp.Response } } -func awsRestjson1_deserializeOpDocumentGetSmsTemplateOutput(v **GetSmsTemplateOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetSegmentVersionsOutput(v **GetSegmentVersionsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12184,17 +12508,17 @@ func awsRestjson1_deserializeOpDocumentGetSmsTemplateOutput(v **GetSmsTemplateOu return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetSmsTemplateOutput + var sv *GetSegmentVersionsOutput if *v == nil { - sv = &GetSmsTemplateOutput{} + sv = &GetSegmentVersionsOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "SMSTemplateResponse": - if err := awsRestjson1_deserializeDocumentSMSTemplateResponse(&sv.SMSTemplateResponse, value); err != nil { + case "SegmentsResponse": + if err := awsRestjson1_deserializeDocumentSegmentsResponse(&sv.SegmentsResponse, value); err != nil { return err } @@ -12207,14 +12531,14 @@ func awsRestjson1_deserializeOpDocumentGetSmsTemplateOutput(v **GetSmsTemplateOu return nil } -type awsRestjson1_deserializeOpGetUserEndpoints struct { +type awsRestjson1_deserializeOpGetSmsChannel struct { } -func (*awsRestjson1_deserializeOpGetUserEndpoints) ID() string { +func (*awsRestjson1_deserializeOpGetSmsChannel) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetUserEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetSmsChannel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12228,9 +12552,9 @@ func (m *awsRestjson1_deserializeOpGetUserEndpoints) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetUserEndpoints(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetSmsChannel(response, &metadata) } - output := &GetUserEndpointsOutput{} + output := &GetSmsChannelOutput{} out.Result = output var buff [1024]byte @@ -12251,7 +12575,7 @@ func (m *awsRestjson1_deserializeOpGetUserEndpoints) HandleDeserialize(ctx conte return out, metadata, err } - err = awsRestjson1_deserializeDocumentEndpointsResponse(&output.EndpointsResponse, shape) + err = awsRestjson1_deserializeDocumentSMSChannelResponse(&output.SMSChannelResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12264,7 +12588,7 @@ func (m *awsRestjson1_deserializeOpGetUserEndpoints) HandleDeserialize(ctx conte return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetUserEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetSmsChannel(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12336,7 +12660,7 @@ func awsRestjson1_deserializeOpErrorGetUserEndpoints(response *smithyhttp.Respon } } -func awsRestjson1_deserializeOpDocumentGetUserEndpointsOutput(v **GetUserEndpointsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetSmsChannelOutput(v **GetSmsChannelOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12349,17 +12673,17 @@ func awsRestjson1_deserializeOpDocumentGetUserEndpointsOutput(v **GetUserEndpoin return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetUserEndpointsOutput + var sv *GetSmsChannelOutput if *v == nil { - sv = &GetUserEndpointsOutput{} + sv = &GetSmsChannelOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "EndpointsResponse": - if err := awsRestjson1_deserializeDocumentEndpointsResponse(&sv.EndpointsResponse, value); err != nil { + case "SMSChannelResponse": + if err := awsRestjson1_deserializeDocumentSMSChannelResponse(&sv.SMSChannelResponse, value); err != nil { return err } @@ -12372,14 +12696,14 @@ func awsRestjson1_deserializeOpDocumentGetUserEndpointsOutput(v **GetUserEndpoin return nil } -type awsRestjson1_deserializeOpGetVoiceChannel struct { +type awsRestjson1_deserializeOpGetSmsTemplate struct { } -func (*awsRestjson1_deserializeOpGetVoiceChannel) ID() string { +func (*awsRestjson1_deserializeOpGetSmsTemplate) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetVoiceChannel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetSmsTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12393,9 +12717,9 @@ func (m *awsRestjson1_deserializeOpGetVoiceChannel) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetVoiceChannel(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetSmsTemplate(response, &metadata) } - output := &GetVoiceChannelOutput{} + output := &GetSmsTemplateOutput{} out.Result = output var buff [1024]byte @@ -12416,7 +12740,7 @@ func (m *awsRestjson1_deserializeOpGetVoiceChannel) HandleDeserialize(ctx contex return out, metadata, err } - err = awsRestjson1_deserializeDocumentVoiceChannelResponse(&output.VoiceChannelResponse, shape) + err = awsRestjson1_deserializeDocumentSMSTemplateResponse(&output.SMSTemplateResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12429,7 +12753,7 @@ func (m *awsRestjson1_deserializeOpGetVoiceChannel) HandleDeserialize(ctx contex return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetVoiceChannel(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetSmsTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12501,7 +12825,7 @@ func awsRestjson1_deserializeOpErrorGetVoiceChannel(response *smithyhttp.Respons } } -func awsRestjson1_deserializeOpDocumentGetVoiceChannelOutput(v **GetVoiceChannelOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetSmsTemplateOutput(v **GetSmsTemplateOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12514,17 +12838,17 @@ func awsRestjson1_deserializeOpDocumentGetVoiceChannelOutput(v **GetVoiceChannel return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetVoiceChannelOutput + var sv *GetSmsTemplateOutput if *v == nil { - sv = &GetVoiceChannelOutput{} + sv = &GetSmsTemplateOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "VoiceChannelResponse": - if err := awsRestjson1_deserializeDocumentVoiceChannelResponse(&sv.VoiceChannelResponse, value); err != nil { + case "SMSTemplateResponse": + if err := awsRestjson1_deserializeDocumentSMSTemplateResponse(&sv.SMSTemplateResponse, value); err != nil { return err } @@ -12537,14 +12861,14 @@ func awsRestjson1_deserializeOpDocumentGetVoiceChannelOutput(v **GetVoiceChannel return nil } -type awsRestjson1_deserializeOpGetVoiceTemplate struct { +type awsRestjson1_deserializeOpGetUserEndpoints struct { } -func (*awsRestjson1_deserializeOpGetVoiceTemplate) ID() string { +func (*awsRestjson1_deserializeOpGetUserEndpoints) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetVoiceTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetUserEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12558,9 +12882,9 @@ func (m *awsRestjson1_deserializeOpGetVoiceTemplate) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetVoiceTemplate(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetUserEndpoints(response, &metadata) } - output := &GetVoiceTemplateOutput{} + output := &GetUserEndpointsOutput{} out.Result = output var buff [1024]byte @@ -12581,7 +12905,7 @@ func (m *awsRestjson1_deserializeOpGetVoiceTemplate) HandleDeserialize(ctx conte return out, metadata, err } - err = awsRestjson1_deserializeDocumentVoiceTemplateResponse(&output.VoiceTemplateResponse, shape) + err = awsRestjson1_deserializeDocumentEndpointsResponse(&output.EndpointsResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12594,7 +12918,7 @@ func (m *awsRestjson1_deserializeOpGetVoiceTemplate) HandleDeserialize(ctx conte return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetVoiceTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetUserEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12666,7 +12990,7 @@ func awsRestjson1_deserializeOpErrorGetVoiceTemplate(response *smithyhttp.Respon } } -func awsRestjson1_deserializeOpDocumentGetVoiceTemplateOutput(v **GetVoiceTemplateOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetUserEndpointsOutput(v **GetUserEndpointsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12679,17 +13003,17 @@ func awsRestjson1_deserializeOpDocumentGetVoiceTemplateOutput(v **GetVoiceTempla return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetVoiceTemplateOutput + var sv *GetUserEndpointsOutput if *v == nil { - sv = &GetVoiceTemplateOutput{} + sv = &GetUserEndpointsOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "VoiceTemplateResponse": - if err := awsRestjson1_deserializeDocumentVoiceTemplateResponse(&sv.VoiceTemplateResponse, value); err != nil { + case "EndpointsResponse": + if err := awsRestjson1_deserializeDocumentEndpointsResponse(&sv.EndpointsResponse, value); err != nil { return err } @@ -12702,14 +13026,14 @@ func awsRestjson1_deserializeOpDocumentGetVoiceTemplateOutput(v **GetVoiceTempla return nil } -type awsRestjson1_deserializeOpListJourneys struct { +type awsRestjson1_deserializeOpGetVoiceChannel struct { } -func (*awsRestjson1_deserializeOpListJourneys) ID() string { +func (*awsRestjson1_deserializeOpGetVoiceChannel) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListJourneys) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetVoiceChannel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12723,9 +13047,9 @@ func (m *awsRestjson1_deserializeOpListJourneys) HandleDeserialize(ctx context.C } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListJourneys(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetVoiceChannel(response, &metadata) } - output := &ListJourneysOutput{} + output := &GetVoiceChannelOutput{} out.Result = output var buff [1024]byte @@ -12746,7 +13070,7 @@ func (m *awsRestjson1_deserializeOpListJourneys) HandleDeserialize(ctx context.C return out, metadata, err } - err = awsRestjson1_deserializeDocumentJourneysResponse(&output.JourneysResponse, shape) + err = awsRestjson1_deserializeDocumentVoiceChannelResponse(&output.VoiceChannelResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12759,7 +13083,7 @@ func (m *awsRestjson1_deserializeOpListJourneys) HandleDeserialize(ctx context.C return out, metadata, err } -func awsRestjson1_deserializeOpErrorListJourneys(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetVoiceChannel(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12831,7 +13155,7 @@ func awsRestjson1_deserializeOpErrorListJourneys(response *smithyhttp.Response, } } -func awsRestjson1_deserializeOpDocumentListJourneysOutput(v **ListJourneysOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetVoiceChannelOutput(v **GetVoiceChannelOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12844,17 +13168,17 @@ func awsRestjson1_deserializeOpDocumentListJourneysOutput(v **ListJourneysOutput return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListJourneysOutput + var sv *GetVoiceChannelOutput if *v == nil { - sv = &ListJourneysOutput{} + sv = &GetVoiceChannelOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "JourneysResponse": - if err := awsRestjson1_deserializeDocumentJourneysResponse(&sv.JourneysResponse, value); err != nil { + case "VoiceChannelResponse": + if err := awsRestjson1_deserializeDocumentVoiceChannelResponse(&sv.VoiceChannelResponse, value); err != nil { return err } @@ -12867,14 +13191,14 @@ func awsRestjson1_deserializeOpDocumentListJourneysOutput(v **ListJourneysOutput return nil } -type awsRestjson1_deserializeOpListTagsForResource struct { +type awsRestjson1_deserializeOpGetVoiceTemplate struct { } -func (*awsRestjson1_deserializeOpListTagsForResource) ID() string { +func (*awsRestjson1_deserializeOpGetVoiceTemplate) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetVoiceTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -12888,9 +13212,9 @@ func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListTagsForResource(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetVoiceTemplate(response, &metadata) } - output := &ListTagsForResourceOutput{} + output := &GetVoiceTemplateOutput{} out.Result = output var buff [1024]byte @@ -12911,7 +13235,7 @@ func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx co return out, metadata, err } - err = awsRestjson1_deserializeDocumentTagsModel(&output.TagsModel, shape) + err = awsRestjson1_deserializeDocumentVoiceTemplateResponse(&output.VoiceTemplateResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -12924,7 +13248,7 @@ func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx co return out, metadata, err } -func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetVoiceTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -12965,6 +13289,27 @@ func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Res } switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("MethodNotAllowedException", errorCode): + return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("PayloadTooLargeException", errorCode): + return awsRestjson1_deserializeErrorPayloadTooLargeException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -12975,7 +13320,7 @@ func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Res } } -func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetVoiceTemplateOutput(v **GetVoiceTemplateOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -12988,17 +13333,17 @@ func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsFor return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListTagsForResourceOutput + var sv *GetVoiceTemplateOutput if *v == nil { - sv = &ListTagsForResourceOutput{} + sv = &GetVoiceTemplateOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "TagsModel": - if err := awsRestjson1_deserializeDocumentTagsModel(&sv.TagsModel, value); err != nil { + case "VoiceTemplateResponse": + if err := awsRestjson1_deserializeDocumentVoiceTemplateResponse(&sv.VoiceTemplateResponse, value); err != nil { return err } @@ -13011,14 +13356,14 @@ func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsFor return nil } -type awsRestjson1_deserializeOpListTemplates struct { +type awsRestjson1_deserializeOpListJourneys struct { } -func (*awsRestjson1_deserializeOpListTemplates) ID() string { +func (*awsRestjson1_deserializeOpListJourneys) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListTemplates) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListJourneys) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13032,9 +13377,9 @@ func (m *awsRestjson1_deserializeOpListTemplates) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListTemplates(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListJourneys(response, &metadata) } - output := &ListTemplatesOutput{} + output := &ListJourneysOutput{} out.Result = output var buff [1024]byte @@ -13055,7 +13400,7 @@ func (m *awsRestjson1_deserializeOpListTemplates) HandleDeserialize(ctx context. return out, metadata, err } - err = awsRestjson1_deserializeDocumentTemplatesResponse(&output.TemplatesResponse, shape) + err = awsRestjson1_deserializeDocumentJourneysResponse(&output.JourneysResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13068,7 +13413,7 @@ func (m *awsRestjson1_deserializeOpListTemplates) HandleDeserialize(ctx context. return out, metadata, err } -func awsRestjson1_deserializeOpErrorListTemplates(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListJourneys(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13121,6 +13466,12 @@ func awsRestjson1_deserializeOpErrorListTemplates(response *smithyhttp.Response, case strings.EqualFold("MethodNotAllowedException", errorCode): return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("PayloadTooLargeException", errorCode): + return awsRestjson1_deserializeErrorPayloadTooLargeException(response, errorBody) + case strings.EqualFold("TooManyRequestsException", errorCode): return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) @@ -13134,7 +13485,7 @@ func awsRestjson1_deserializeOpErrorListTemplates(response *smithyhttp.Response, } } -func awsRestjson1_deserializeOpDocumentListTemplatesOutput(v **ListTemplatesOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListJourneysOutput(v **ListJourneysOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -13147,17 +13498,17 @@ func awsRestjson1_deserializeOpDocumentListTemplatesOutput(v **ListTemplatesOutp return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListTemplatesOutput + var sv *ListJourneysOutput if *v == nil { - sv = &ListTemplatesOutput{} + sv = &ListJourneysOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "TemplatesResponse": - if err := awsRestjson1_deserializeDocumentTemplatesResponse(&sv.TemplatesResponse, value); err != nil { + case "JourneysResponse": + if err := awsRestjson1_deserializeDocumentJourneysResponse(&sv.JourneysResponse, value); err != nil { return err } @@ -13170,14 +13521,14 @@ func awsRestjson1_deserializeOpDocumentListTemplatesOutput(v **ListTemplatesOutp return nil } -type awsRestjson1_deserializeOpListTemplateVersions struct { +type awsRestjson1_deserializeOpListTagsForResource struct { } -func (*awsRestjson1_deserializeOpListTemplateVersions) ID() string { +func (*awsRestjson1_deserializeOpListTagsForResource) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListTemplateVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13191,9 +13542,9 @@ func (m *awsRestjson1_deserializeOpListTemplateVersions) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListTemplateVersions(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListTagsForResource(response, &metadata) } - output := &ListTemplateVersionsOutput{} + output := &ListTagsForResourceOutput{} out.Result = output var buff [1024]byte @@ -13214,7 +13565,7 @@ func (m *awsRestjson1_deserializeOpListTemplateVersions) HandleDeserialize(ctx c return out, metadata, err } - err = awsRestjson1_deserializeDocumentTemplateVersionsResponse(&output.TemplateVersionsResponse, shape) + err = awsRestjson1_deserializeDocumentTagsModel(&output.TagsModel, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13227,7 +13578,7 @@ func (m *awsRestjson1_deserializeOpListTemplateVersions) HandleDeserialize(ctx c return out, metadata, err } -func awsRestjson1_deserializeOpErrorListTemplateVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13268,27 +13619,6 @@ func awsRestjson1_deserializeOpErrorListTemplateVersions(response *smithyhttp.Re } switch { - case strings.EqualFold("BadRequestException", errorCode): - return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) - - case strings.EqualFold("ForbiddenException", errorCode): - return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) - - case strings.EqualFold("InternalServerErrorException", errorCode): - return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) - - case strings.EqualFold("MethodNotAllowedException", errorCode): - return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) - - case strings.EqualFold("NotFoundException", errorCode): - return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) - - case strings.EqualFold("PayloadTooLargeException", errorCode): - return awsRestjson1_deserializeErrorPayloadTooLargeException(response, errorBody) - - case strings.EqualFold("TooManyRequestsException", errorCode): - return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -13299,7 +13629,7 @@ func awsRestjson1_deserializeOpErrorListTemplateVersions(response *smithyhttp.Re } } -func awsRestjson1_deserializeOpDocumentListTemplateVersionsOutput(v **ListTemplateVersionsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -13312,17 +13642,17 @@ func awsRestjson1_deserializeOpDocumentListTemplateVersionsOutput(v **ListTempla return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListTemplateVersionsOutput + var sv *ListTagsForResourceOutput if *v == nil { - sv = &ListTemplateVersionsOutput{} + sv = &ListTagsForResourceOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "TemplateVersionsResponse": - if err := awsRestjson1_deserializeDocumentTemplateVersionsResponse(&sv.TemplateVersionsResponse, value); err != nil { + case "TagsModel": + if err := awsRestjson1_deserializeDocumentTagsModel(&sv.TagsModel, value); err != nil { return err } @@ -13335,14 +13665,14 @@ func awsRestjson1_deserializeOpDocumentListTemplateVersionsOutput(v **ListTempla return nil } -type awsRestjson1_deserializeOpPhoneNumberValidate struct { +type awsRestjson1_deserializeOpListTemplates struct { } -func (*awsRestjson1_deserializeOpPhoneNumberValidate) ID() string { +func (*awsRestjson1_deserializeOpListTemplates) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpPhoneNumberValidate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListTemplates) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13356,9 +13686,9 @@ func (m *awsRestjson1_deserializeOpPhoneNumberValidate) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorPhoneNumberValidate(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListTemplates(response, &metadata) } - output := &PhoneNumberValidateOutput{} + output := &ListTemplatesOutput{} out.Result = output var buff [1024]byte @@ -13379,7 +13709,7 @@ func (m *awsRestjson1_deserializeOpPhoneNumberValidate) HandleDeserialize(ctx co return out, metadata, err } - err = awsRestjson1_deserializeDocumentNumberValidateResponse(&output.NumberValidateResponse, shape) + err = awsRestjson1_deserializeDocumentTemplatesResponse(&output.TemplatesResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13392,7 +13722,7 @@ func (m *awsRestjson1_deserializeOpPhoneNumberValidate) HandleDeserialize(ctx co return out, metadata, err } -func awsRestjson1_deserializeOpErrorPhoneNumberValidate(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListTemplates(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13445,12 +13775,6 @@ func awsRestjson1_deserializeOpErrorPhoneNumberValidate(response *smithyhttp.Res case strings.EqualFold("MethodNotAllowedException", errorCode): return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) - case strings.EqualFold("NotFoundException", errorCode): - return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) - - case strings.EqualFold("PayloadTooLargeException", errorCode): - return awsRestjson1_deserializeErrorPayloadTooLargeException(response, errorBody) - case strings.EqualFold("TooManyRequestsException", errorCode): return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) @@ -13464,7 +13788,7 @@ func awsRestjson1_deserializeOpErrorPhoneNumberValidate(response *smithyhttp.Res } } -func awsRestjson1_deserializeOpDocumentPhoneNumberValidateOutput(v **PhoneNumberValidateOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListTemplatesOutput(v **ListTemplatesOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -13477,17 +13801,17 @@ func awsRestjson1_deserializeOpDocumentPhoneNumberValidateOutput(v **PhoneNumber return fmt.Errorf("unexpected JSON type %v", value) } - var sv *PhoneNumberValidateOutput + var sv *ListTemplatesOutput if *v == nil { - sv = &PhoneNumberValidateOutput{} + sv = &ListTemplatesOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "NumberValidateResponse": - if err := awsRestjson1_deserializeDocumentNumberValidateResponse(&sv.NumberValidateResponse, value); err != nil { + case "TemplatesResponse": + if err := awsRestjson1_deserializeDocumentTemplatesResponse(&sv.TemplatesResponse, value); err != nil { return err } @@ -13500,14 +13824,14 @@ func awsRestjson1_deserializeOpDocumentPhoneNumberValidateOutput(v **PhoneNumber return nil } -type awsRestjson1_deserializeOpPutEvents struct { +type awsRestjson1_deserializeOpListTemplateVersions struct { } -func (*awsRestjson1_deserializeOpPutEvents) ID() string { +func (*awsRestjson1_deserializeOpListTemplateVersions) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpPutEvents) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListTemplateVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13521,9 +13845,9 @@ func (m *awsRestjson1_deserializeOpPutEvents) HandleDeserialize(ctx context.Cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorPutEvents(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListTemplateVersions(response, &metadata) } - output := &PutEventsOutput{} + output := &ListTemplateVersionsOutput{} out.Result = output var buff [1024]byte @@ -13544,7 +13868,7 @@ func (m *awsRestjson1_deserializeOpPutEvents) HandleDeserialize(ctx context.Cont return out, metadata, err } - err = awsRestjson1_deserializeDocumentEventsResponse(&output.EventsResponse, shape) + err = awsRestjson1_deserializeDocumentTemplateVersionsResponse(&output.TemplateVersionsResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13557,7 +13881,7 @@ func (m *awsRestjson1_deserializeOpPutEvents) HandleDeserialize(ctx context.Cont return out, metadata, err } -func awsRestjson1_deserializeOpErrorPutEvents(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListTemplateVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13629,7 +13953,7 @@ func awsRestjson1_deserializeOpErrorPutEvents(response *smithyhttp.Response, met } } -func awsRestjson1_deserializeOpDocumentPutEventsOutput(v **PutEventsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListTemplateVersionsOutput(v **ListTemplateVersionsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -13642,17 +13966,17 @@ func awsRestjson1_deserializeOpDocumentPutEventsOutput(v **PutEventsOutput, valu return fmt.Errorf("unexpected JSON type %v", value) } - var sv *PutEventsOutput + var sv *ListTemplateVersionsOutput if *v == nil { - sv = &PutEventsOutput{} + sv = &ListTemplateVersionsOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "EventsResponse": - if err := awsRestjson1_deserializeDocumentEventsResponse(&sv.EventsResponse, value); err != nil { + case "TemplateVersionsResponse": + if err := awsRestjson1_deserializeDocumentTemplateVersionsResponse(&sv.TemplateVersionsResponse, value); err != nil { return err } @@ -13665,14 +13989,14 @@ func awsRestjson1_deserializeOpDocumentPutEventsOutput(v **PutEventsOutput, valu return nil } -type awsRestjson1_deserializeOpPutEventStream struct { +type awsRestjson1_deserializeOpPhoneNumberValidate struct { } -func (*awsRestjson1_deserializeOpPutEventStream) ID() string { +func (*awsRestjson1_deserializeOpPhoneNumberValidate) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpPutEventStream) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpPhoneNumberValidate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13686,9 +14010,9 @@ func (m *awsRestjson1_deserializeOpPutEventStream) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorPutEventStream(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorPhoneNumberValidate(response, &metadata) } - output := &PutEventStreamOutput{} + output := &PhoneNumberValidateOutput{} out.Result = output var buff [1024]byte @@ -13709,7 +14033,7 @@ func (m *awsRestjson1_deserializeOpPutEventStream) HandleDeserialize(ctx context return out, metadata, err } - err = awsRestjson1_deserializeDocumentEventStream(&output.EventStream, shape) + err = awsRestjson1_deserializeDocumentNumberValidateResponse(&output.NumberValidateResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13722,7 +14046,7 @@ func (m *awsRestjson1_deserializeOpPutEventStream) HandleDeserialize(ctx context return out, metadata, err } -func awsRestjson1_deserializeOpErrorPutEventStream(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorPhoneNumberValidate(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13794,7 +14118,7 @@ func awsRestjson1_deserializeOpErrorPutEventStream(response *smithyhttp.Response } } -func awsRestjson1_deserializeOpDocumentPutEventStreamOutput(v **PutEventStreamOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentPhoneNumberValidateOutput(v **PhoneNumberValidateOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -13807,17 +14131,17 @@ func awsRestjson1_deserializeOpDocumentPutEventStreamOutput(v **PutEventStreamOu return fmt.Errorf("unexpected JSON type %v", value) } - var sv *PutEventStreamOutput + var sv *PhoneNumberValidateOutput if *v == nil { - sv = &PutEventStreamOutput{} + sv = &PhoneNumberValidateOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "EventStream": - if err := awsRestjson1_deserializeDocumentEventStream(&sv.EventStream, value); err != nil { + case "NumberValidateResponse": + if err := awsRestjson1_deserializeDocumentNumberValidateResponse(&sv.NumberValidateResponse, value); err != nil { return err } @@ -13830,14 +14154,14 @@ func awsRestjson1_deserializeOpDocumentPutEventStreamOutput(v **PutEventStreamOu return nil } -type awsRestjson1_deserializeOpRemoveAttributes struct { +type awsRestjson1_deserializeOpPutEvents struct { } -func (*awsRestjson1_deserializeOpRemoveAttributes) ID() string { +func (*awsRestjson1_deserializeOpPutEvents) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpRemoveAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpPutEvents) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13851,9 +14175,9 @@ func (m *awsRestjson1_deserializeOpRemoveAttributes) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorRemoveAttributes(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorPutEvents(response, &metadata) } - output := &RemoveAttributesOutput{} + output := &PutEventsOutput{} out.Result = output var buff [1024]byte @@ -13874,7 +14198,7 @@ func (m *awsRestjson1_deserializeOpRemoveAttributes) HandleDeserialize(ctx conte return out, metadata, err } - err = awsRestjson1_deserializeDocumentAttributesResource(&output.AttributesResource, shape) + err = awsRestjson1_deserializeDocumentEventsResponse(&output.EventsResponse, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13887,7 +14211,7 @@ func (m *awsRestjson1_deserializeOpRemoveAttributes) HandleDeserialize(ctx conte return out, metadata, err } -func awsRestjson1_deserializeOpErrorRemoveAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorPutEvents(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13959,7 +14283,7 @@ func awsRestjson1_deserializeOpErrorRemoveAttributes(response *smithyhttp.Respon } } -func awsRestjson1_deserializeOpDocumentRemoveAttributesOutput(v **RemoveAttributesOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentPutEventsOutput(v **PutEventsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -13972,17 +14296,17 @@ func awsRestjson1_deserializeOpDocumentRemoveAttributesOutput(v **RemoveAttribut return fmt.Errorf("unexpected JSON type %v", value) } - var sv *RemoveAttributesOutput + var sv *PutEventsOutput if *v == nil { - sv = &RemoveAttributesOutput{} + sv = &PutEventsOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "AttributesResource": - if err := awsRestjson1_deserializeDocumentAttributesResource(&sv.AttributesResource, value); err != nil { + case "EventsResponse": + if err := awsRestjson1_deserializeDocumentEventsResponse(&sv.EventsResponse, value); err != nil { return err } @@ -13995,14 +14319,14 @@ func awsRestjson1_deserializeOpDocumentRemoveAttributesOutput(v **RemoveAttribut return nil } -type awsRestjson1_deserializeOpSendMessages struct { +type awsRestjson1_deserializeOpPutEventStream struct { } -func (*awsRestjson1_deserializeOpSendMessages) ID() string { +func (*awsRestjson1_deserializeOpPutEventStream) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpSendMessages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpPutEventStream) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14016,9 +14340,9 @@ func (m *awsRestjson1_deserializeOpSendMessages) HandleDeserialize(ctx context.C } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorSendMessages(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorPutEventStream(response, &metadata) } - output := &SendMessagesOutput{} + output := &PutEventStreamOutput{} out.Result = output var buff [1024]byte @@ -14039,7 +14363,7 @@ func (m *awsRestjson1_deserializeOpSendMessages) HandleDeserialize(ctx context.C return out, metadata, err } - err = awsRestjson1_deserializeDocumentMessageResponse(&output.MessageResponse, shape) + err = awsRestjson1_deserializeDocumentEventStream(&output.EventStream, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14052,7 +14376,7 @@ func (m *awsRestjson1_deserializeOpSendMessages) HandleDeserialize(ctx context.C return out, metadata, err } -func awsRestjson1_deserializeOpErrorSendMessages(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorPutEventStream(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14124,7 +14448,7 @@ func awsRestjson1_deserializeOpErrorSendMessages(response *smithyhttp.Response, } } -func awsRestjson1_deserializeOpDocumentSendMessagesOutput(v **SendMessagesOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentPutEventStreamOutput(v **PutEventStreamOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -14137,17 +14461,17 @@ func awsRestjson1_deserializeOpDocumentSendMessagesOutput(v **SendMessagesOutput return fmt.Errorf("unexpected JSON type %v", value) } - var sv *SendMessagesOutput + var sv *PutEventStreamOutput if *v == nil { - sv = &SendMessagesOutput{} + sv = &PutEventStreamOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "MessageResponse": - if err := awsRestjson1_deserializeDocumentMessageResponse(&sv.MessageResponse, value); err != nil { + case "EventStream": + if err := awsRestjson1_deserializeDocumentEventStream(&sv.EventStream, value); err != nil { return err } @@ -14160,14 +14484,14 @@ func awsRestjson1_deserializeOpDocumentSendMessagesOutput(v **SendMessagesOutput return nil } -type awsRestjson1_deserializeOpSendUsersMessages struct { +type awsRestjson1_deserializeOpRemoveAttributes struct { } -func (*awsRestjson1_deserializeOpSendUsersMessages) ID() string { +func (*awsRestjson1_deserializeOpRemoveAttributes) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpSendUsersMessages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpRemoveAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14181,9 +14505,9 @@ func (m *awsRestjson1_deserializeOpSendUsersMessages) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorSendUsersMessages(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorRemoveAttributes(response, &metadata) } - output := &SendUsersMessagesOutput{} + output := &RemoveAttributesOutput{} out.Result = output var buff [1024]byte @@ -14204,7 +14528,7 @@ func (m *awsRestjson1_deserializeOpSendUsersMessages) HandleDeserialize(ctx cont return out, metadata, err } - err = awsRestjson1_deserializeDocumentSendUsersMessageResponse(&output.SendUsersMessageResponse, shape) + err = awsRestjson1_deserializeDocumentAttributesResource(&output.AttributesResource, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14217,7 +14541,7 @@ func (m *awsRestjson1_deserializeOpSendUsersMessages) HandleDeserialize(ctx cont return out, metadata, err } -func awsRestjson1_deserializeOpErrorSendUsersMessages(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorRemoveAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14289,7 +14613,7 @@ func awsRestjson1_deserializeOpErrorSendUsersMessages(response *smithyhttp.Respo } } -func awsRestjson1_deserializeOpDocumentSendUsersMessagesOutput(v **SendUsersMessagesOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentRemoveAttributesOutput(v **RemoveAttributesOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -14302,17 +14626,17 @@ func awsRestjson1_deserializeOpDocumentSendUsersMessagesOutput(v **SendUsersMess return fmt.Errorf("unexpected JSON type %v", value) } - var sv *SendUsersMessagesOutput + var sv *RemoveAttributesOutput if *v == nil { - sv = &SendUsersMessagesOutput{} + sv = &RemoveAttributesOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "SendUsersMessageResponse": - if err := awsRestjson1_deserializeDocumentSendUsersMessageResponse(&sv.SendUsersMessageResponse, value); err != nil { + case "AttributesResource": + if err := awsRestjson1_deserializeDocumentAttributesResource(&sv.AttributesResource, value); err != nil { return err } @@ -14325,14 +14649,14 @@ func awsRestjson1_deserializeOpDocumentSendUsersMessagesOutput(v **SendUsersMess return nil } -type awsRestjson1_deserializeOpTagResource struct { +type awsRestjson1_deserializeOpSendMessages struct { } -func (*awsRestjson1_deserializeOpTagResource) ID() string { +func (*awsRestjson1_deserializeOpSendMessages) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpSendMessages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14346,21 +14670,43 @@ func (m *awsRestjson1_deserializeOpTagResource) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorTagResource(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorSendMessages(response, &metadata) } - output := &TagResourceOutput{} + output := &SendMessagesOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeDocumentMessageResponse(&output.MessageResponse, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), } } return out, metadata, err } -func awsRestjson1_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorSendMessages(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14401,6 +14747,27 @@ func awsRestjson1_deserializeOpErrorTagResource(response *smithyhttp.Response, m } switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("MethodNotAllowedException", errorCode): + return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("PayloadTooLargeException", errorCode): + return awsRestjson1_deserializeErrorPayloadTooLargeException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -14411,14 +14778,50 @@ func awsRestjson1_deserializeOpErrorTagResource(response *smithyhttp.Response, m } } -type awsRestjson1_deserializeOpUntagResource struct { +func awsRestjson1_deserializeOpDocumentSendMessagesOutput(v **SendMessagesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *SendMessagesOutput + if *v == nil { + sv = &SendMessagesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MessageResponse": + if err := awsRestjson1_deserializeDocumentMessageResponse(&sv.MessageResponse, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil } -func (*awsRestjson1_deserializeOpUntagResource) ID() string { +type awsRestjson1_deserializeOpSendUsersMessages struct { +} + +func (*awsRestjson1_deserializeOpSendUsersMessages) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpSendUsersMessages) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14432,21 +14835,43 @@ func (m *awsRestjson1_deserializeOpUntagResource) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorUntagResource(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorSendUsersMessages(response, &metadata) } - output := &UntagResourceOutput{} + output := &SendUsersMessagesOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeDocumentSendUsersMessageResponse(&output.SendUsersMessageResponse, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), } } return out, metadata, err } -func awsRestjson1_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorSendUsersMessages(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14487,6 +14912,27 @@ func awsRestjson1_deserializeOpErrorUntagResource(response *smithyhttp.Response, } switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("MethodNotAllowedException", errorCode): + return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("PayloadTooLargeException", errorCode): + return awsRestjson1_deserializeErrorPayloadTooLargeException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -14497,13 +14943,221 @@ func awsRestjson1_deserializeOpErrorUntagResource(response *smithyhttp.Response, } } -type awsRestjson1_deserializeOpUpdateAdmChannel struct { -} - -func (*awsRestjson1_deserializeOpUpdateAdmChannel) ID() string { - return "OperationDeserializer" -} - +func awsRestjson1_deserializeOpDocumentSendUsersMessagesOutput(v **SendUsersMessagesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *SendUsersMessagesOutput + if *v == nil { + sv = &SendUsersMessagesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "SendUsersMessageResponse": + if err := awsRestjson1_deserializeDocumentSendUsersMessageResponse(&sv.SendUsersMessageResponse, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpTagResource struct { +} + +func (*awsRestjson1_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestjson1_deserializeOpUntagResource struct { +} + +func (*awsRestjson1_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestjson1_deserializeOpUpdateAdmChannel struct { +} + +func (*awsRestjson1_deserializeOpUpdateAdmChannel) ID() string { + return "OperationDeserializer" +} + func (m *awsRestjson1_deserializeOpUpdateAdmChannel) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { @@ -16642,14 +17296,14 @@ func awsRestjson1_deserializeOpDocumentUpdateGcmChannelOutput(v **UpdateGcmChann return nil } -type awsRestjson1_deserializeOpUpdateJourney struct { +type awsRestjson1_deserializeOpUpdateInAppTemplate struct { } -func (*awsRestjson1_deserializeOpUpdateJourney) ID() string { +func (*awsRestjson1_deserializeOpUpdateInAppTemplate) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpUpdateJourney) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpUpdateInAppTemplate) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -16663,9 +17317,9 @@ func (m *awsRestjson1_deserializeOpUpdateJourney) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorUpdateJourney(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorUpdateInAppTemplate(response, &metadata) } - output := &UpdateJourneyOutput{} + output := &UpdateInAppTemplateOutput{} out.Result = output var buff [1024]byte @@ -16686,7 +17340,7 @@ func (m *awsRestjson1_deserializeOpUpdateJourney) HandleDeserialize(ctx context. return out, metadata, err } - err = awsRestjson1_deserializeDocumentJourneyResponse(&output.JourneyResponse, shape) + err = awsRestjson1_deserializeDocumentMessageBody(&output.MessageBody, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -16699,7 +17353,7 @@ func (m *awsRestjson1_deserializeOpUpdateJourney) HandleDeserialize(ctx context. return out, metadata, err } -func awsRestjson1_deserializeOpErrorUpdateJourney(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorUpdateInAppTemplate(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -16743,9 +17397,6 @@ func awsRestjson1_deserializeOpErrorUpdateJourney(response *smithyhttp.Response, case strings.EqualFold("BadRequestException", errorCode): return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) - case strings.EqualFold("ConflictException", errorCode): - return awsRestjson1_deserializeErrorConflictException(response, errorBody) - case strings.EqualFold("ForbiddenException", errorCode): return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) @@ -16774,7 +17425,7 @@ func awsRestjson1_deserializeOpErrorUpdateJourney(response *smithyhttp.Response, } } -func awsRestjson1_deserializeOpDocumentUpdateJourneyOutput(v **UpdateJourneyOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentUpdateInAppTemplateOutput(v **UpdateInAppTemplateOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -16787,17 +17438,17 @@ func awsRestjson1_deserializeOpDocumentUpdateJourneyOutput(v **UpdateJourneyOutp return fmt.Errorf("unexpected JSON type %v", value) } - var sv *UpdateJourneyOutput + var sv *UpdateInAppTemplateOutput if *v == nil { - sv = &UpdateJourneyOutput{} + sv = &UpdateInAppTemplateOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "JourneyResponse": - if err := awsRestjson1_deserializeDocumentJourneyResponse(&sv.JourneyResponse, value); err != nil { + case "MessageBody": + if err := awsRestjson1_deserializeDocumentMessageBody(&sv.MessageBody, value); err != nil { return err } @@ -16810,14 +17461,14 @@ func awsRestjson1_deserializeOpDocumentUpdateJourneyOutput(v **UpdateJourneyOutp return nil } -type awsRestjson1_deserializeOpUpdateJourneyState struct { +type awsRestjson1_deserializeOpUpdateJourney struct { } -func (*awsRestjson1_deserializeOpUpdateJourneyState) ID() string { +func (*awsRestjson1_deserializeOpUpdateJourney) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpUpdateJourneyState) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpUpdateJourney) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -16831,9 +17482,9 @@ func (m *awsRestjson1_deserializeOpUpdateJourneyState) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorUpdateJourneyState(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorUpdateJourney(response, &metadata) } - output := &UpdateJourneyStateOutput{} + output := &UpdateJourneyOutput{} out.Result = output var buff [1024]byte @@ -16867,7 +17518,7 @@ func (m *awsRestjson1_deserializeOpUpdateJourneyState) HandleDeserialize(ctx con return out, metadata, err } -func awsRestjson1_deserializeOpErrorUpdateJourneyState(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorUpdateJourney(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -16911,6 +17562,9 @@ func awsRestjson1_deserializeOpErrorUpdateJourneyState(response *smithyhttp.Resp case strings.EqualFold("BadRequestException", errorCode): return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + case strings.EqualFold("ForbiddenException", errorCode): return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) @@ -16939,7 +17593,7 @@ func awsRestjson1_deserializeOpErrorUpdateJourneyState(response *smithyhttp.Resp } } -func awsRestjson1_deserializeOpDocumentUpdateJourneyStateOutput(v **UpdateJourneyStateOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentUpdateJourneyOutput(v **UpdateJourneyOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -16952,9 +17606,9 @@ func awsRestjson1_deserializeOpDocumentUpdateJourneyStateOutput(v **UpdateJourne return fmt.Errorf("unexpected JSON type %v", value) } - var sv *UpdateJourneyStateOutput + var sv *UpdateJourneyOutput if *v == nil { - sv = &UpdateJourneyStateOutput{} + sv = &UpdateJourneyOutput{} } else { sv = *v } @@ -16975,7 +17629,172 @@ func awsRestjson1_deserializeOpDocumentUpdateJourneyStateOutput(v **UpdateJourne return nil } -type awsRestjson1_deserializeOpUpdatePushTemplate struct { +type awsRestjson1_deserializeOpUpdateJourneyState struct { +} + +func (*awsRestjson1_deserializeOpUpdateJourneyState) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpUpdateJourneyState) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorUpdateJourneyState(response, &metadata) + } + output := &UpdateJourneyStateOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeDocumentJourneyResponse(&output.JourneyResponse, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorUpdateJourneyState(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("MethodNotAllowedException", errorCode): + return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("PayloadTooLargeException", errorCode): + return awsRestjson1_deserializeErrorPayloadTooLargeException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentUpdateJourneyStateOutput(v **UpdateJourneyStateOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateJourneyStateOutput + if *v == nil { + sv = &UpdateJourneyStateOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "JourneyResponse": + if err := awsRestjson1_deserializeDocumentJourneyResponse(&sv.JourneyResponse, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpUpdatePushTemplate struct { } func (*awsRestjson1_deserializeOpUpdatePushTemplate) ID() string { @@ -20667,6 +21486,65 @@ func awsRestjson1_deserializeDocumentCampaignHook(v **types.CampaignHook, value return nil } +func awsRestjson1_deserializeDocumentCampaignInAppMessage(v **types.CampaignInAppMessage, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CampaignInAppMessage + if *v == nil { + sv = &types.CampaignInAppMessage{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Body": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Body = ptr.String(jtv) + } + + case "Content": + if err := awsRestjson1_deserializeDocumentListOfInAppMessageContent(&sv.Content, value); err != nil { + return err + } + + case "CustomConfig": + if err := awsRestjson1_deserializeDocumentMapOf__string(&sv.CustomConfig, value); err != nil { + return err + } + + case "Layout": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Layout to be of type string, got %T instead", value) + } + sv.Layout = types.Layout(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentCampaignLimits(v **types.CampaignLimits, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -20728,6 +21606,19 @@ func awsRestjson1_deserializeDocumentCampaignLimits(v **types.CampaignLimits, va sv.MessagesPerSecond = int32(i64) } + case "Session": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Session = int32(i64) + } + case "Total": if value != nil { jtv, ok := value.(json.Number) @@ -20887,6 +21778,19 @@ func awsRestjson1_deserializeDocumentCampaignResponse(v **types.CampaignResponse sv.Name = ptr.String(jtv) } + case "Priority": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Priority = int32(i64) + } + case "Schedule": if err := awsRestjson1_deserializeDocumentSchedule(&sv.Schedule, value); err != nil { return err @@ -21624,7 +22528,7 @@ func awsRestjson1_deserializeDocumentCustomMessageActivity(v **types.CustomMessa return nil } -func awsRestjson1_deserializeDocumentDefaultPushNotificationTemplate(v **types.DefaultPushNotificationTemplate, value interface{}) error { +func awsRestjson1_deserializeDocumentDefaultButtonConfiguration(v **types.DefaultButtonConfiguration, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -21637,40 +22541,129 @@ func awsRestjson1_deserializeDocumentDefaultPushNotificationTemplate(v **types.D return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.DefaultPushNotificationTemplate + var sv *types.DefaultButtonConfiguration if *v == nil { - sv = &types.DefaultPushNotificationTemplate{} + sv = &types.DefaultButtonConfiguration{} } else { sv = *v } for key, value := range shape { switch key { - case "Action": + case "BackgroundColor": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected Action to be of type string, got %T instead", value) + return fmt.Errorf("expected __string to be of type string, got %T instead", value) } - sv.Action = types.Action(jtv) + sv.BackgroundColor = ptr.String(jtv) } - case "Body": + case "BorderRadius": if value != nil { - jtv, ok := value.(string) + jtv, ok := value.(json.Number) if !ok { - return fmt.Errorf("expected __string to be of type string, got %T instead", value) + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) } - sv.Body = ptr.String(jtv) + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.BorderRadius = int32(i64) } - case "Sound": + case "ButtonAction": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected __string to be of type string, got %T instead", value) + return fmt.Errorf("expected ButtonAction to be of type string, got %T instead", value) } - sv.Sound = ptr.String(jtv) + sv.ButtonAction = types.ButtonAction(jtv) + } + + case "Link": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Link = ptr.String(jtv) + } + + case "Text": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Text = ptr.String(jtv) + } + + case "TextColor": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.TextColor = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentDefaultPushNotificationTemplate(v **types.DefaultPushNotificationTemplate, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DefaultPushNotificationTemplate + if *v == nil { + sv = &types.DefaultPushNotificationTemplate{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Action": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Action to be of type string, got %T instead", value) + } + sv.Action = types.Action(jtv) + } + + case "Body": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Body = ptr.String(jtv) + } + + case "Sound": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Sound = ptr.String(jtv) } case "Title": @@ -23907,120 +24900,721 @@ func awsRestjson1_deserializeDocumentImportJobResponse(v **types.ImportJobRespon sv.FailedPieces = int32(i64) } - case "Failures": - if err := awsRestjson1_deserializeDocumentListOf__string(&sv.Failures, value); err != nil { + case "Failures": + if err := awsRestjson1_deserializeDocumentListOf__string(&sv.Failures, value); err != nil { + return err + } + + case "Id": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Id = ptr.String(jtv) + } + + case "JobStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected JobStatus to be of type string, got %T instead", value) + } + sv.JobStatus = types.JobStatus(jtv) + } + + case "TotalFailures": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TotalFailures = int32(i64) + } + + case "TotalPieces": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TotalPieces = int32(i64) + } + + case "TotalProcessed": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TotalProcessed = int32(i64) + } + + case "Type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Type = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentImportJobsResponse(v **types.ImportJobsResponse, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ImportJobsResponse + if *v == nil { + sv = &types.ImportJobsResponse{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Item": + if err := awsRestjson1_deserializeDocumentListOfImportJobResponse(&sv.Item, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInAppCampaignSchedule(v **types.InAppCampaignSchedule, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InAppCampaignSchedule + if *v == nil { + sv = &types.InAppCampaignSchedule{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EndDate": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.EndDate = ptr.String(jtv) + } + + case "EventFilter": + if err := awsRestjson1_deserializeDocumentCampaignEventFilter(&sv.EventFilter, value); err != nil { + return err + } + + case "QuietTime": + if err := awsRestjson1_deserializeDocumentQuietTime(&sv.QuietTime, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInAppMessage(v **types.InAppMessage, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InAppMessage + if *v == nil { + sv = &types.InAppMessage{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Content": + if err := awsRestjson1_deserializeDocumentListOfInAppMessageContent(&sv.Content, value); err != nil { + return err + } + + case "CustomConfig": + if err := awsRestjson1_deserializeDocumentMapOf__string(&sv.CustomConfig, value); err != nil { + return err + } + + case "Layout": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Layout to be of type string, got %T instead", value) + } + sv.Layout = types.Layout(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInAppMessageBodyConfig(v **types.InAppMessageBodyConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InAppMessageBodyConfig + if *v == nil { + sv = &types.InAppMessageBodyConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Alignment": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Alignment to be of type string, got %T instead", value) + } + sv.Alignment = types.Alignment(jtv) + } + + case "Body": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Body = ptr.String(jtv) + } + + case "TextColor": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.TextColor = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInAppMessageButton(v **types.InAppMessageButton, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InAppMessageButton + if *v == nil { + sv = &types.InAppMessageButton{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Android": + if err := awsRestjson1_deserializeDocumentOverrideButtonConfiguration(&sv.Android, value); err != nil { + return err + } + + case "DefaultConfig": + if err := awsRestjson1_deserializeDocumentDefaultButtonConfiguration(&sv.DefaultConfig, value); err != nil { + return err + } + + case "IOS": + if err := awsRestjson1_deserializeDocumentOverrideButtonConfiguration(&sv.IOS, value); err != nil { + return err + } + + case "Web": + if err := awsRestjson1_deserializeDocumentOverrideButtonConfiguration(&sv.Web, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInAppMessageCampaign(v **types.InAppMessageCampaign, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InAppMessageCampaign + if *v == nil { + sv = &types.InAppMessageCampaign{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CampaignId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.CampaignId = ptr.String(jtv) + } + + case "DailyCap": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.DailyCap = int32(i64) + } + + case "InAppMessage": + if err := awsRestjson1_deserializeDocumentInAppMessage(&sv.InAppMessage, value); err != nil { + return err + } + + case "Priority": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Priority = int32(i64) + } + + case "Schedule": + if err := awsRestjson1_deserializeDocumentInAppCampaignSchedule(&sv.Schedule, value); err != nil { + return err + } + + case "SessionCap": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.SessionCap = int32(i64) + } + + case "TotalCap": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.TotalCap = int32(i64) + } + + case "TreatmentId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.TreatmentId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInAppMessageContent(v **types.InAppMessageContent, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InAppMessageContent + if *v == nil { + sv = &types.InAppMessageContent{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "BackgroundColor": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.BackgroundColor = ptr.String(jtv) + } + + case "BodyConfig": + if err := awsRestjson1_deserializeDocumentInAppMessageBodyConfig(&sv.BodyConfig, value); err != nil { + return err + } + + case "HeaderConfig": + if err := awsRestjson1_deserializeDocumentInAppMessageHeaderConfig(&sv.HeaderConfig, value); err != nil { + return err + } + + case "ImageUrl": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.ImageUrl = ptr.String(jtv) + } + + case "PrimaryBtn": + if err := awsRestjson1_deserializeDocumentInAppMessageButton(&sv.PrimaryBtn, value); err != nil { + return err + } + + case "SecondaryBtn": + if err := awsRestjson1_deserializeDocumentInAppMessageButton(&sv.SecondaryBtn, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInAppMessageHeaderConfig(v **types.InAppMessageHeaderConfig, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InAppMessageHeaderConfig + if *v == nil { + sv = &types.InAppMessageHeaderConfig{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Alignment": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Alignment to be of type string, got %T instead", value) + } + sv.Alignment = types.Alignment(jtv) + } + + case "Header": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Header = ptr.String(jtv) + } + + case "TextColor": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.TextColor = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInAppMessagesResponse(v **types.InAppMessagesResponse, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InAppMessagesResponse + if *v == nil { + sv = &types.InAppMessagesResponse{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "InAppMessageCampaigns": + if err := awsRestjson1_deserializeDocumentListOfInAppMessageCampaign(&sv.InAppMessageCampaigns, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInAppTemplateResponse(v **types.InAppTemplateResponse, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InAppTemplateResponse + if *v == nil { + sv = &types.InAppTemplateResponse{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "Content": + if err := awsRestjson1_deserializeDocumentListOfInAppMessageContent(&sv.Content, value); err != nil { return err } - case "Id": + case "CreationDate": if value != nil { jtv, ok := value.(string) if !ok { return fmt.Errorf("expected __string to be of type string, got %T instead", value) } - sv.Id = ptr.String(jtv) + sv.CreationDate = ptr.String(jtv) } - case "JobStatus": + case "CustomConfig": + if err := awsRestjson1_deserializeDocumentMapOf__string(&sv.CustomConfig, value); err != nil { + return err + } + + case "LastModifiedDate": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected JobStatus to be of type string, got %T instead", value) + return fmt.Errorf("expected __string to be of type string, got %T instead", value) } - sv.JobStatus = types.JobStatus(jtv) + sv.LastModifiedDate = ptr.String(jtv) } - case "TotalFailures": + case "Layout": if value != nil { - jtv, ok := value.(json.Number) + jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err + return fmt.Errorf("expected Layout to be of type string, got %T instead", value) } - sv.TotalFailures = int32(i64) + sv.Layout = types.Layout(jtv) } - case "TotalPieces": - if value != nil { - jtv, ok := value.(json.Number) - if !ok { - return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err - } - sv.TotalPieces = int32(i64) + case "tags": + if err := awsRestjson1_deserializeDocumentMapOf__string(&sv.Tags, value); err != nil { + return err } - case "TotalProcessed": + case "TemplateDescription": if value != nil { - jtv, ok := value.(json.Number) + jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected __integer to be json.Number, got %T instead", value) - } - i64, err := jtv.Int64() - if err != nil { - return err + return fmt.Errorf("expected __string to be of type string, got %T instead", value) } - sv.TotalProcessed = int32(i64) + sv.TemplateDescription = ptr.String(jtv) } - case "Type": + case "TemplateName": if value != nil { jtv, ok := value.(string) if !ok { return fmt.Errorf("expected __string to be of type string, got %T instead", value) } - sv.Type = ptr.String(jtv) + sv.TemplateName = ptr.String(jtv) } - default: - _, _ = key, value - - } - } - *v = sv - return nil -} - -func awsRestjson1_deserializeDocumentImportJobsResponse(v **types.ImportJobsResponse, value interface{}) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - if value == nil { - return nil - } - - shape, ok := value.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected JSON type %v", value) - } - - var sv *types.ImportJobsResponse - if *v == nil { - sv = &types.ImportJobsResponse{} - } else { - sv = *v - } - - for key, value := range shape { - switch key { - case "Item": - if err := awsRestjson1_deserializeDocumentListOfImportJobResponse(&sv.Item, value); err != nil { - return err + case "TemplateType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TemplateType to be of type string, got %T instead", value) + } + sv.TemplateType = types.TemplateType(jtv) } - case "NextToken": + case "Version": if value != nil { jtv, ok := value.(string) if !ok { return fmt.Errorf("expected __string to be of type string, got %T instead", value) } - sv.NextToken = ptr.String(jtv) + sv.Version = ptr.String(jtv) } default: @@ -25186,6 +26780,74 @@ func awsRestjson1_deserializeDocumentListOfImportJobResponse(v *[]types.ImportJo return nil } +func awsRestjson1_deserializeDocumentListOfInAppMessageCampaign(v *[]types.InAppMessageCampaign, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.InAppMessageCampaign + if *v == nil { + cv = []types.InAppMessageCampaign{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.InAppMessageCampaign + destAddr := &col + if err := awsRestjson1_deserializeDocumentInAppMessageCampaign(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentListOfInAppMessageContent(v *[]types.InAppMessageContent, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.InAppMessageContent + if *v == nil { + cv = []types.InAppMessageContent{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.InAppMessageContent + destAddr := &col + if err := awsRestjson1_deserializeDocumentInAppMessageContent(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsRestjson1_deserializeDocumentListOfJourneyResponse(v *[]types.JourneyResponse, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -26441,6 +28103,11 @@ func awsRestjson1_deserializeDocumentMessageConfiguration(v **types.MessageConfi return err } + case "InAppMessage": + if err := awsRestjson1_deserializeDocumentCampaignInAppMessage(&sv.InAppMessage, value); err != nil { + return err + } + case "SMSMessage": if err := awsRestjson1_deserializeDocumentCampaignSmsMessage(&sv.SMSMessage, value); err != nil { return err @@ -27022,6 +28689,55 @@ func awsRestjson1_deserializeDocumentNumberValidateResponse(v **types.NumberVali return nil } +func awsRestjson1_deserializeDocumentOverrideButtonConfiguration(v **types.OverrideButtonConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.OverrideButtonConfiguration + if *v == nil { + sv = &types.OverrideButtonConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ButtonAction": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ButtonAction to be of type string, got %T instead", value) + } + sv.ButtonAction = types.ButtonAction(jtv) + } + + case "Link": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Link = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentPayloadTooLargeException(v **types.PayloadTooLargeException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -29129,6 +30845,64 @@ func awsRestjson1_deserializeDocumentTemplateConfiguration(v **types.TemplateCon return nil } +func awsRestjson1_deserializeDocumentTemplateCreateMessageBody(v **types.TemplateCreateMessageBody, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TemplateCreateMessageBody + if *v == nil { + sv = &types.TemplateCreateMessageBody{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "Message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "RequestID": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected __string to be of type string, got %T instead", value) + } + sv.RequestID = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentTemplateResponse(v **types.TemplateResponse, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/pinpoint/generated.json b/service/pinpoint/generated.json index 63e2a0c3893..af6cd9a40fa 100644 --- a/service/pinpoint/generated.json +++ b/service/pinpoint/generated.json @@ -10,6 +10,7 @@ "api_op_CreateEmailTemplate.go", "api_op_CreateExportJob.go", "api_op_CreateImportJob.go", + "api_op_CreateInAppTemplate.go", "api_op_CreateJourney.go", "api_op_CreatePushTemplate.go", "api_op_CreateRecommenderConfiguration.go", @@ -29,6 +30,7 @@ "api_op_DeleteEndpoint.go", "api_op_DeleteEventStream.go", "api_op_DeleteGcmChannel.go", + "api_op_DeleteInAppTemplate.go", "api_op_DeleteJourney.go", "api_op_DeletePushTemplate.go", "api_op_DeleteRecommenderConfiguration.go", @@ -64,6 +66,8 @@ "api_op_GetGcmChannel.go", "api_op_GetImportJob.go", "api_op_GetImportJobs.go", + "api_op_GetInAppMessages.go", + "api_op_GetInAppTemplate.go", "api_op_GetJourney.go", "api_op_GetJourneyDateRangeKpi.go", "api_op_GetJourneyExecutionActivityMetrics.go", @@ -107,6 +111,7 @@ "api_op_UpdateEndpoint.go", "api_op_UpdateEndpointsBatch.go", "api_op_UpdateGcmChannel.go", + "api_op_UpdateInAppTemplate.go", "api_op_UpdateJourney.go", "api_op_UpdateJourneyState.go", "api_op_UpdatePushTemplate.go", diff --git a/service/pinpoint/serializers.go b/service/pinpoint/serializers.go index ba8e5a0410d..532c40eb34d 100644 --- a/service/pinpoint/serializers.go +++ b/service/pinpoint/serializers.go @@ -368,6 +368,79 @@ func awsRestjson1_serializeOpHttpBindingsCreateImportJobInput(v *CreateImportJob return nil } +type awsRestjson1_serializeOpCreateInAppTemplate struct { +} + +func (*awsRestjson1_serializeOpCreateInAppTemplate) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateInAppTemplate) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateInAppTemplateInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/templates/{TemplateName}/inapp") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsCreateInAppTemplateInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.InAppTemplateRequest != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/json") + } + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeDocumentInAppTemplateRequest(input.InAppTemplateRequest, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(jsonEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateInAppTemplateInput(v *CreateInAppTemplateInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.TemplateName == nil || len(*v.TemplateName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member TemplateName must not be empty")} + } + if v.TemplateName != nil { + if err := encoder.SetURI("TemplateName").String(*v.TemplateName); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpCreateJourney struct { } @@ -1569,6 +1642,68 @@ func awsRestjson1_serializeOpHttpBindingsDeleteGcmChannelInput(v *DeleteGcmChann return nil } +type awsRestjson1_serializeOpDeleteInAppTemplate struct { +} + +func (*awsRestjson1_serializeOpDeleteInAppTemplate) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpDeleteInAppTemplate) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteInAppTemplateInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/templates/{TemplateName}/inapp") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsDeleteInAppTemplateInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsDeleteInAppTemplateInput(v *DeleteInAppTemplateInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.TemplateName == nil || len(*v.TemplateName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member TemplateName must not be empty")} + } + if v.TemplateName != nil { + if err := encoder.SetURI("TemplateName").String(*v.TemplateName); err != nil { + return err + } + } + + if v.Version != nil { + encoder.SetQuery("version").String(*v.Version) + } + + return nil +} + type awsRestjson1_serializeOpDeleteJourney struct { } @@ -3812,6 +3947,135 @@ func awsRestjson1_serializeOpHttpBindingsGetImportJobsInput(v *GetImportJobsInpu return nil } +type awsRestjson1_serializeOpGetInAppMessages struct { +} + +func (*awsRestjson1_serializeOpGetInAppMessages) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetInAppMessages) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetInAppMessagesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/apps/{ApplicationId}/endpoints/{EndpointId}/inappmessages") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetInAppMessagesInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetInAppMessagesInput(v *GetInAppMessagesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ApplicationId == nil || len(*v.ApplicationId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member ApplicationId must not be empty")} + } + if v.ApplicationId != nil { + if err := encoder.SetURI("ApplicationId").String(*v.ApplicationId); err != nil { + return err + } + } + + if v.EndpointId == nil || len(*v.EndpointId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member EndpointId must not be empty")} + } + if v.EndpointId != nil { + if err := encoder.SetURI("EndpointId").String(*v.EndpointId); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpGetInAppTemplate struct { +} + +func (*awsRestjson1_serializeOpGetInAppTemplate) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetInAppTemplate) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetInAppTemplateInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/templates/{TemplateName}/inapp") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetInAppTemplateInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetInAppTemplateInput(v *GetInAppTemplateInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.TemplateName == nil || len(*v.TemplateName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member TemplateName must not be empty")} + } + if v.TemplateName != nil { + if err := encoder.SetURI("TemplateName").String(*v.TemplateName); err != nil { + return err + } + } + + if v.Version != nil { + encoder.SetQuery("version").String(*v.Version) + } + + return nil +} + type awsRestjson1_serializeOpGetJourney struct { } @@ -6858,6 +7122,87 @@ func awsRestjson1_serializeOpHttpBindingsUpdateGcmChannelInput(v *UpdateGcmChann return nil } +type awsRestjson1_serializeOpUpdateInAppTemplate struct { +} + +func (*awsRestjson1_serializeOpUpdateInAppTemplate) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpUpdateInAppTemplate) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateInAppTemplateInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/v1/templates/{TemplateName}/inapp") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsUpdateInAppTemplateInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.InAppTemplateRequest != nil { + if !restEncoder.HasHeader("Content-Type") { + restEncoder.SetHeader("Content-Type").String("application/json") + } + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeDocumentInAppTemplateRequest(input.InAppTemplateRequest, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(jsonEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsUpdateInAppTemplateInput(v *UpdateInAppTemplateInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.CreateNewVersion { + encoder.SetQuery("create-new-version").Boolean(v.CreateNewVersion) + } + + if v.TemplateName == nil || len(*v.TemplateName) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member TemplateName must not be empty")} + } + if v.TemplateName != nil { + if err := encoder.SetURI("TemplateName").String(*v.TemplateName); err != nil { + return err + } + } + + if v.Version != nil { + encoder.SetQuery("version").String(*v.Version) + } + + return nil +} + type awsRestjson1_serializeOpUpdateJourney struct { } @@ -8462,6 +8807,37 @@ func awsRestjson1_serializeDocumentCampaignHook(v *types.CampaignHook, value smi return nil } +func awsRestjson1_serializeDocumentCampaignInAppMessage(v *types.CampaignInAppMessage, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Body != nil { + ok := object.Key("Body") + ok.String(*v.Body) + } + + if v.Content != nil { + ok := object.Key("Content") + if err := awsRestjson1_serializeDocumentListOfInAppMessageContent(v.Content, ok); err != nil { + return err + } + } + + if v.CustomConfig != nil { + ok := object.Key("CustomConfig") + if err := awsRestjson1_serializeDocumentMapOf__string(v.CustomConfig, ok); err != nil { + return err + } + } + + if len(v.Layout) > 0 { + ok := object.Key("Layout") + ok.String(string(v.Layout)) + } + + return nil +} + func awsRestjson1_serializeDocumentCampaignLimits(v *types.CampaignLimits, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -8481,6 +8857,11 @@ func awsRestjson1_serializeDocumentCampaignLimits(v *types.CampaignLimits, value ok.Integer(v.MessagesPerSecond) } + if v.Session != 0 { + ok := object.Key("Session") + ok.Integer(v.Session) + } + if v.Total != 0 { ok := object.Key("Total") ok.Integer(v.Total) @@ -8709,6 +9090,43 @@ func awsRestjson1_serializeDocumentCustomMessageActivity(v *types.CustomMessageA return nil } +func awsRestjson1_serializeDocumentDefaultButtonConfiguration(v *types.DefaultButtonConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BackgroundColor != nil { + ok := object.Key("BackgroundColor") + ok.String(*v.BackgroundColor) + } + + if v.BorderRadius != 0 { + ok := object.Key("BorderRadius") + ok.Integer(v.BorderRadius) + } + + if len(v.ButtonAction) > 0 { + ok := object.Key("ButtonAction") + ok.String(string(v.ButtonAction)) + } + + if v.Link != nil { + ok := object.Key("Link") + ok.String(*v.Link) + } + + if v.Text != nil { + ok := object.Key("Text") + ok.String(*v.Text) + } + + if v.TextColor != nil { + ok := object.Key("TextColor") + ok.String(*v.TextColor) + } + + return nil +} + func awsRestjson1_serializeDocumentDefaultMessage(v *types.DefaultMessage, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -9821,6 +10239,168 @@ func awsRestjson1_serializeDocumentImportJobRequest(v *types.ImportJobRequest, v return nil } +func awsRestjson1_serializeDocumentInAppMessageBodyConfig(v *types.InAppMessageBodyConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Alignment) > 0 { + ok := object.Key("Alignment") + ok.String(string(v.Alignment)) + } + + if v.Body != nil { + ok := object.Key("Body") + ok.String(*v.Body) + } + + if v.TextColor != nil { + ok := object.Key("TextColor") + ok.String(*v.TextColor) + } + + return nil +} + +func awsRestjson1_serializeDocumentInAppMessageButton(v *types.InAppMessageButton, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Android != nil { + ok := object.Key("Android") + if err := awsRestjson1_serializeDocumentOverrideButtonConfiguration(v.Android, ok); err != nil { + return err + } + } + + if v.DefaultConfig != nil { + ok := object.Key("DefaultConfig") + if err := awsRestjson1_serializeDocumentDefaultButtonConfiguration(v.DefaultConfig, ok); err != nil { + return err + } + } + + if v.IOS != nil { + ok := object.Key("IOS") + if err := awsRestjson1_serializeDocumentOverrideButtonConfiguration(v.IOS, ok); err != nil { + return err + } + } + + if v.Web != nil { + ok := object.Key("Web") + if err := awsRestjson1_serializeDocumentOverrideButtonConfiguration(v.Web, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentInAppMessageContent(v *types.InAppMessageContent, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BackgroundColor != nil { + ok := object.Key("BackgroundColor") + ok.String(*v.BackgroundColor) + } + + if v.BodyConfig != nil { + ok := object.Key("BodyConfig") + if err := awsRestjson1_serializeDocumentInAppMessageBodyConfig(v.BodyConfig, ok); err != nil { + return err + } + } + + if v.HeaderConfig != nil { + ok := object.Key("HeaderConfig") + if err := awsRestjson1_serializeDocumentInAppMessageHeaderConfig(v.HeaderConfig, ok); err != nil { + return err + } + } + + if v.ImageUrl != nil { + ok := object.Key("ImageUrl") + ok.String(*v.ImageUrl) + } + + if v.PrimaryBtn != nil { + ok := object.Key("PrimaryBtn") + if err := awsRestjson1_serializeDocumentInAppMessageButton(v.PrimaryBtn, ok); err != nil { + return err + } + } + + if v.SecondaryBtn != nil { + ok := object.Key("SecondaryBtn") + if err := awsRestjson1_serializeDocumentInAppMessageButton(v.SecondaryBtn, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentInAppMessageHeaderConfig(v *types.InAppMessageHeaderConfig, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Alignment) > 0 { + ok := object.Key("Alignment") + ok.String(string(v.Alignment)) + } + + if v.Header != nil { + ok := object.Key("Header") + ok.String(*v.Header) + } + + if v.TextColor != nil { + ok := object.Key("TextColor") + ok.String(*v.TextColor) + } + + return nil +} + +func awsRestjson1_serializeDocumentInAppTemplateRequest(v *types.InAppTemplateRequest, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Content != nil { + ok := object.Key("Content") + if err := awsRestjson1_serializeDocumentListOfInAppMessageContent(v.Content, ok); err != nil { + return err + } + } + + if v.CustomConfig != nil { + ok := object.Key("CustomConfig") + if err := awsRestjson1_serializeDocumentMapOf__string(v.CustomConfig, ok); err != nil { + return err + } + } + + if len(v.Layout) > 0 { + ok := object.Key("Layout") + ok.String(string(v.Layout)) + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsRestjson1_serializeDocumentMapOf__string(v.Tags, ok); err != nil { + return err + } + } + + if v.TemplateDescription != nil { + ok := object.Key("TemplateDescription") + ok.String(*v.TemplateDescription) + } + + return nil +} + func awsRestjson1_serializeDocumentJourneyCustomMessage(v *types.JourneyCustomMessage, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -9985,6 +10565,19 @@ func awsRestjson1_serializeDocumentListOfEndpointBatchItem(v []types.EndpointBat return nil } +func awsRestjson1_serializeDocumentListOfInAppMessageContent(v []types.InAppMessageContent, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentInAppMessageContent(&v[i], av); err != nil { + return err + } + } + return nil +} + func awsRestjson1_serializeDocumentListOfMultiConditionalBranch(v []types.MultiConditionalBranch, value smithyjson.Value) error { array := value.Array() defer array.Close() @@ -10345,6 +10938,13 @@ func awsRestjson1_serializeDocumentMessageConfiguration(v *types.MessageConfigur } } + if v.InAppMessage != nil { + ok := object.Key("InAppMessage") + if err := awsRestjson1_serializeDocumentCampaignInAppMessage(v.InAppMessage, ok); err != nil { + return err + } + } + if v.SMSMessage != nil { ok := object.Key("SMSMessage") if err := awsRestjson1_serializeDocumentCampaignSmsMessage(v.SMSMessage, ok); err != nil { @@ -10494,6 +11094,23 @@ func awsRestjson1_serializeDocumentNumberValidateRequest(v *types.NumberValidate return nil } +func awsRestjson1_serializeDocumentOverrideButtonConfiguration(v *types.OverrideButtonConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.ButtonAction) > 0 { + ok := object.Key("ButtonAction") + ok.String(string(v.ButtonAction)) + } + + if v.Link != nil { + ok := object.Key("Link") + ok.String(*v.Link) + } + + return nil +} + func awsRestjson1_serializeDocumentPublicEndpoint(v *types.PublicEndpoint, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -11682,6 +12299,11 @@ func awsRestjson1_serializeDocumentWriteCampaignRequest(v *types.WriteCampaignRe ok.String(*v.Name) } + if v.Priority != 0 { + ok := object.Key("Priority") + ok.Integer(v.Priority) + } + if v.Schedule != nil { ok := object.Key("Schedule") if err := awsRestjson1_serializeDocumentSchedule(v.Schedule, ok); err != nil { diff --git a/service/pinpoint/types/enums.go b/service/pinpoint/types/enums.go index b7fe877562b..822da481e1b 100644 --- a/service/pinpoint/types/enums.go +++ b/service/pinpoint/types/enums.go @@ -18,6 +18,7 @@ const ( EndpointTypesElementEmail EndpointTypesElement = "EMAIL" EndpointTypesElementBaidu EndpointTypesElement = "BAIDU" EndpointTypesElementCustom EndpointTypesElement = "CUSTOM" + EndpointTypesElementInApp EndpointTypesElement = "IN_APP" ) // Values returns all known values for EndpointTypesElement. Note that this can be @@ -37,6 +38,7 @@ func (EndpointTypesElement) Values() []EndpointTypesElement { "EMAIL", "BAIDU", "CUSTOM", + "IN_APP", } } @@ -60,6 +62,26 @@ func (Action) Values() []Action { } } +type Alignment string + +// Enum values for Alignment +const ( + AlignmentLeft Alignment = "LEFT" + AlignmentCenter Alignment = "CENTER" + AlignmentRight Alignment = "RIGHT" +) + +// Values returns all known values for Alignment. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Alignment) Values() []Alignment { + return []Alignment{ + "LEFT", + "CENTER", + "RIGHT", + } +} + type AttributeType string // Enum values for AttributeType @@ -88,6 +110,26 @@ func (AttributeType) Values() []AttributeType { } } +type ButtonAction string + +// Enum values for ButtonAction +const ( + ButtonActionLink ButtonAction = "LINK" + ButtonActionDeepLink ButtonAction = "DEEP_LINK" + ButtonActionClose ButtonAction = "CLOSE" +) + +// Values returns all known values for ButtonAction. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (ButtonAction) Values() []ButtonAction { + return []ButtonAction{ + "LINK", + "DEEP_LINK", + "CLOSE", + } +} + type CampaignStatus string // Enum values for CampaignStatus @@ -132,6 +174,7 @@ const ( ChannelTypeEmail ChannelType = "EMAIL" ChannelTypeBaidu ChannelType = "BAIDU" ChannelTypeCustom ChannelType = "CUSTOM" + ChannelTypeInApp ChannelType = "IN_APP" ) // Values returns all known values for ChannelType. Note that this can be expanded @@ -151,6 +194,7 @@ func (ChannelType) Values() []ChannelType { "EMAIL", "BAIDU", "CUSTOM", + "IN_APP", } } @@ -262,12 +306,13 @@ type Frequency string // Enum values for Frequency const ( - FrequencyOnce Frequency = "ONCE" - FrequencyHourly Frequency = "HOURLY" - FrequencyDaily Frequency = "DAILY" - FrequencyWeekly Frequency = "WEEKLY" - FrequencyMonthly Frequency = "MONTHLY" - FrequencyEvent Frequency = "EVENT" + FrequencyOnce Frequency = "ONCE" + FrequencyHourly Frequency = "HOURLY" + FrequencyDaily Frequency = "DAILY" + FrequencyWeekly Frequency = "WEEKLY" + FrequencyMonthly Frequency = "MONTHLY" + FrequencyEvent Frequency = "EVENT" + FrequencyInAppEvent Frequency = "IN_APP_EVENT" ) // Values returns all known values for Frequency. Note that this can be expanded in @@ -281,6 +326,7 @@ func (Frequency) Values() []Frequency { "WEEKLY", "MONTHLY", "EVENT", + "IN_APP_EVENT", } } @@ -336,6 +382,32 @@ func (JobStatus) Values() []JobStatus { } } +type Layout string + +// Enum values for Layout +const ( + LayoutBottomBanner Layout = "BOTTOM_BANNER" + LayoutTopBanner Layout = "TOP_BANNER" + LayoutOverlays Layout = "OVERLAYS" + LayoutMobileFeed Layout = "MOBILE_FEED" + LayoutMiddleBanner Layout = "MIDDLE_BANNER" + LayoutCarousel Layout = "CAROUSEL" +) + +// Values returns all known values for Layout. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (Layout) Values() []Layout { + return []Layout{ + "BOTTOM_BANNER", + "TOP_BANNER", + "OVERLAYS", + "MOBILE_FEED", + "MIDDLE_BANNER", + "CAROUSEL", + } +} + type MessageType string // Enum values for MessageType @@ -480,6 +552,7 @@ const ( TemplateTypeSms TemplateType = "SMS" TemplateTypeVoice TemplateType = "VOICE" TemplateTypePush TemplateType = "PUSH" + TemplateTypeInapp TemplateType = "INAPP" ) // Values returns all known values for TemplateType. Note that this can be expanded @@ -491,6 +564,7 @@ func (TemplateType) Values() []TemplateType { "SMS", "VOICE", "PUSH", + "INAPP", } } diff --git a/service/pinpoint/types/types.go b/service/pinpoint/types/types.go index 027f4f0c11c..2ab93c0ab98 100644 --- a/service/pinpoint/types/types.go +++ b/service/pinpoint/types/types.go @@ -1429,6 +1429,24 @@ type CampaignHook struct { noSmithyDocumentSerde } +// In-app message configuration. +type CampaignInAppMessage struct { + + // The message body of the notification, the email body or the text message. + Body *string + + // In-app message content. + Content []InAppMessageContent + + // Custom config to be sent to client. + CustomConfig map[string]string + + // In-app message layout. + Layout Layout + + noSmithyDocumentSerde +} + // For a campaign, specifies limits on the messages that the campaign can send. For // an application, specifies the default limits for messages that campaigns in the // application can send. @@ -1451,6 +1469,10 @@ type CampaignLimits struct { // is 20,000. MessagesPerSecond int32 + // The maximum total number of messages that the campaign can send per user + // session. + Session int32 + // The maximum number of messages that a campaign can send to a single endpoint // during the course of the campaign. If a campaign recurs, this setting applies to // all runs of the campaign. The maximum value is 100. @@ -1534,6 +1556,11 @@ type CampaignResponse struct { // The name of the campaign. Name *string + // Defines the priority of the campaign, used to decide the order of messages + // displayed to user if there are multiple messages scheduled to be displayed at + // the same moment. + Priority int32 + // The schedule settings for the campaign. Schedule *Schedule @@ -1920,6 +1947,34 @@ type CustomMessageActivity struct { noSmithyDocumentSerde } +// Default button configuration. +type DefaultButtonConfiguration struct { + + // Action triggered by the button. + // + // This member is required. + ButtonAction ButtonAction + + // Button text. + // + // This member is required. + Text *string + + // The background color of the button. + BackgroundColor *string + + // The border radius of the button. + BorderRadius int32 + + // Button destination. + Link *string + + // The text color of the button. + TextColor *string + + noSmithyDocumentSerde +} + // Specifies the default message for all channels. type DefaultMessage struct { @@ -3535,6 +3590,238 @@ type ImportJobsResponse struct { noSmithyDocumentSerde } +// Schedule of the campaign. +type InAppCampaignSchedule struct { + + // The scheduled time after which the in-app message should not be shown. Timestamp + // is in ISO 8601 format. + EndDate *string + + // The event filter the SDK has to use to show the in-app message in the + // application. + EventFilter *CampaignEventFilter + + // Time during which the in-app message should not be shown to the user. + QuietTime *QuietTime + + noSmithyDocumentSerde +} + +// Provides all fields required for building an in-app message. +type InAppMessage struct { + + // In-app message content. + Content []InAppMessageContent + + // Custom config to be sent to SDK. + CustomConfig map[string]string + + // The layout of the message. + Layout Layout + + noSmithyDocumentSerde +} + +// Text config for Message Body. +type InAppMessageBodyConfig struct { + + // The alignment of the text. Valid values: LEFT, CENTER, RIGHT. + // + // This member is required. + Alignment Alignment + + // Message Body. + // + // This member is required. + Body *string + + // The text color. + // + // This member is required. + TextColor *string + + noSmithyDocumentSerde +} + +// Button Config for an in-app message. +type InAppMessageButton struct { + + // Default button content. + Android *OverrideButtonConfiguration + + // Default button content. + DefaultConfig *DefaultButtonConfiguration + + // Default button content. + IOS *OverrideButtonConfiguration + + // Default button content. + Web *OverrideButtonConfiguration + + noSmithyDocumentSerde +} + +// Targeted in-app message campaign. +type InAppMessageCampaign struct { + + // Campaign id of the corresponding campaign. + CampaignId *string + + // Daily cap which controls the number of times any in-app messages can be shown to + // the endpoint during a day. + DailyCap int32 + + // In-app message content with all fields required for rendering an in-app message. + InAppMessage *InAppMessage + + // Priority of the in-app message. + Priority int32 + + // Schedule of the campaign. + Schedule *InAppCampaignSchedule + + // Session cap which controls the number of times an in-app message can be shown to + // the endpoint during an application session. + SessionCap int32 + + // Total cap which controls the number of times an in-app message can be shown to + // the endpoint. + TotalCap int32 + + // Treatment id of the campaign. + TreatmentId *string + + noSmithyDocumentSerde +} + +// The configuration for the message content. +type InAppMessageContent struct { + + // The background color for the message. + BackgroundColor *string + + // The configuration for the message body. + BodyConfig *InAppMessageBodyConfig + + // The configuration for the message header. + HeaderConfig *InAppMessageHeaderConfig + + // The image url for the background of message. + ImageUrl *string + + // The first button inside the message. + PrimaryBtn *InAppMessageButton + + // The second button inside message. + SecondaryBtn *InAppMessageButton + + noSmithyDocumentSerde +} + +// Text config for Message Header. +type InAppMessageHeaderConfig struct { + + // The alignment of the text. Valid values: LEFT, CENTER, RIGHT. + // + // This member is required. + Alignment Alignment + + // Message Header. + // + // This member is required. + Header *string + + // The text color. + // + // This member is required. + TextColor *string + + noSmithyDocumentSerde +} + +// Get in-app messages response object. +type InAppMessagesResponse struct { + + // List of targeted in-app message campaigns. + InAppMessageCampaigns []InAppMessageCampaign + + noSmithyDocumentSerde +} + +// InApp Template Request. +type InAppTemplateRequest struct { + + // The content of the message, can include up to 5 modals. Each modal must contain + // a message, a header, and background color. ImageUrl and buttons are optional. + Content []InAppMessageContent + + // Custom config to be sent to client. + CustomConfig map[string]string + + // The layout of the message. + Layout Layout + + // A string-to-string map of key-value pairs that defines the tags to associate + // with the message template. Each tag consists of a required tag key and an + // associated tag value. + Tags map[string]string + + // The description of the template. + TemplateDescription *string + + noSmithyDocumentSerde +} + +// In-App Template Response. +type InAppTemplateResponse struct { + + // The creation date of the template. + // + // This member is required. + CreationDate *string + + // The last modified date of the template. + // + // This member is required. + LastModifiedDate *string + + // The name of the template. + // + // This member is required. + TemplateName *string + + // The type of the template. + // + // This member is required. + TemplateType TemplateType + + // The resource arn of the template. + Arn *string + + // The content of the message, can include up to 5 modals. Each modal must contain + // a message, a header, and background color. ImageUrl and buttons are optional. + Content []InAppMessageContent + + // Custom config to be sent to client. + CustomConfig map[string]string + + // The layout of the message. + Layout Layout + + // A string-to-string map of key-value pairs that defines the tags to associate + // with the message template. Each tag consists of a required tag key and an + // associated tag value. + Tags map[string]string + + // The description of the template. + TemplateDescription *string + + // The version id of the template. + Version *string + + noSmithyDocumentSerde +} + // Provides information about the results of a request to create or update an // endpoint that's associated with an event. type ItemResponse struct { @@ -4091,6 +4378,9 @@ type MessageConfiguration struct { // message overrides the default message. GCMMessage *Message + // The in-app message configuration. + InAppMessage *CampaignInAppMessage + // The message that the campaign sends through the SMS channel. If specified, this // message overrides the default message. SMSMessage *CampaignSmsMessage @@ -4352,6 +4642,20 @@ type NumberValidateResponse struct { noSmithyDocumentSerde } +// Override button configuration. +type OverrideButtonConfiguration struct { + + // Action triggered by the button. + // + // This member is required. + ButtonAction ButtonAction + + // Button destination. + Link *string + + noSmithyDocumentSerde +} + // Specifies the properties and attributes of an endpoint that's associated with an // event. type PublicEndpoint struct { @@ -5578,6 +5882,22 @@ type TemplateConfiguration struct { noSmithyDocumentSerde } +// Provides information about a request to create a message template. +type TemplateCreateMessageBody struct { + + // The Amazon Resource Name (ARN) of the message template that was created. + Arn *string + + // The message that's returned from the API for the request to create the message + // template. + Message *string + + // The unique identifier for the request to create the message template. + RequestID *string + + noSmithyDocumentSerde +} + // Provides information about a message template that's associated with your Amazon // Pinpoint account. type TemplateResponse struct { @@ -6176,6 +6496,11 @@ type WriteCampaignRequest struct { // A custom name for the campaign. Name *string + // Defines the priority of the campaign, used to decide the order of messages + // displayed to user if there are multiple messages scheduled to be displayed at + // the same moment. + Priority int32 + // The schedule settings for the campaign. Schedule *Schedule diff --git a/service/pinpoint/validators.go b/service/pinpoint/validators.go index 4bb78431682..ddebc5b8e8b 100644 --- a/service/pinpoint/validators.go +++ b/service/pinpoint/validators.go @@ -110,6 +110,26 @@ func (m *validateOpCreateImportJob) HandleInitialize(ctx context.Context, in mid return next.HandleInitialize(ctx, in) } +type validateOpCreateInAppTemplate struct { +} + +func (*validateOpCreateInAppTemplate) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateInAppTemplate) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateInAppTemplateInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateInAppTemplateInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreateJourney struct { } @@ -490,6 +510,26 @@ func (m *validateOpDeleteGcmChannel) HandleInitialize(ctx context.Context, in mi return next.HandleInitialize(ctx, in) } +type validateOpDeleteInAppTemplate struct { +} + +func (*validateOpDeleteInAppTemplate) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteInAppTemplate) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteInAppTemplateInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteInAppTemplateInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDeleteJourney struct { } @@ -1170,6 +1210,46 @@ func (m *validateOpGetImportJobs) HandleInitialize(ctx context.Context, in middl return next.HandleInitialize(ctx, in) } +type validateOpGetInAppMessages struct { +} + +func (*validateOpGetInAppMessages) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetInAppMessages) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetInAppMessagesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetInAppMessagesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetInAppTemplate struct { +} + +func (*validateOpGetInAppTemplate) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetInAppTemplate) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetInAppTemplateInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetInAppTemplateInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpGetJourneyDateRangeKpi struct { } @@ -1990,6 +2070,26 @@ func (m *validateOpUpdateGcmChannel) HandleInitialize(ctx context.Context, in mi return next.HandleInitialize(ctx, in) } +type validateOpUpdateInAppTemplate struct { +} + +func (*validateOpUpdateInAppTemplate) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateInAppTemplate) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateInAppTemplateInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateInAppTemplateInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpUpdateJourney struct { } @@ -2210,6 +2310,10 @@ func addOpCreateImportJobValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateImportJob{}, middleware.After) } +func addOpCreateInAppTemplateValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateInAppTemplate{}, middleware.After) +} + func addOpCreateJourneyValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateJourney{}, middleware.After) } @@ -2286,6 +2390,10 @@ func addOpDeleteGcmChannelValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteGcmChannel{}, middleware.After) } +func addOpDeleteInAppTemplateValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteInAppTemplate{}, middleware.After) +} + func addOpDeleteJourneyValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteJourney{}, middleware.After) } @@ -2422,6 +2530,14 @@ func addOpGetImportJobsValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetImportJobs{}, middleware.After) } +func addOpGetInAppMessagesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetInAppMessages{}, middleware.After) +} + +func addOpGetInAppTemplateValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetInAppTemplate{}, middleware.After) +} + func addOpGetJourneyDateRangeKpiValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetJourneyDateRangeKpi{}, middleware.After) } @@ -2586,6 +2702,10 @@ func addOpUpdateGcmChannelValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpUpdateGcmChannel{}, middleware.After) } +func addOpUpdateInAppTemplateValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateInAppTemplate{}, middleware.After) +} + func addOpUpdateJourneyValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpUpdateJourney{}, middleware.After) } @@ -2726,6 +2846,23 @@ func validateCampaignEventFilter(v *types.CampaignEventFilter) error { } } +func validateCampaignInAppMessage(v *types.CampaignInAppMessage) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CampaignInAppMessage"} + if v.Content != nil { + if err := validateListOfInAppMessageContent(v.Content); err != nil { + invalidParams.AddNested("Content", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateCondition(v *types.Condition) error { if v == nil { return nil @@ -2808,6 +2945,24 @@ func validateCustomDeliveryConfiguration(v *types.CustomDeliveryConfiguration) e } } +func validateDefaultButtonConfiguration(v *types.DefaultButtonConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DefaultButtonConfiguration"} + if len(v.ButtonAction) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ButtonAction")) + } + if v.Text == nil { + invalidParams.Add(smithy.NewErrParamRequired("Text")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateEmailChannelRequest(v *types.EmailChannelRequest) error { if v == nil { return nil @@ -3085,6 +3240,146 @@ func validateImportJobRequest(v *types.ImportJobRequest) error { } } +func validateInAppMessageBodyConfig(v *types.InAppMessageBodyConfig) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InAppMessageBodyConfig"} + if len(v.Alignment) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Alignment")) + } + if v.Body == nil { + invalidParams.Add(smithy.NewErrParamRequired("Body")) + } + if v.TextColor == nil { + invalidParams.Add(smithy.NewErrParamRequired("TextColor")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInAppMessageButton(v *types.InAppMessageButton) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InAppMessageButton"} + if v.Android != nil { + if err := validateOverrideButtonConfiguration(v.Android); err != nil { + invalidParams.AddNested("Android", err.(smithy.InvalidParamsError)) + } + } + if v.DefaultConfig != nil { + if err := validateDefaultButtonConfiguration(v.DefaultConfig); err != nil { + invalidParams.AddNested("DefaultConfig", err.(smithy.InvalidParamsError)) + } + } + if v.IOS != nil { + if err := validateOverrideButtonConfiguration(v.IOS); err != nil { + invalidParams.AddNested("IOS", err.(smithy.InvalidParamsError)) + } + } + if v.Web != nil { + if err := validateOverrideButtonConfiguration(v.Web); err != nil { + invalidParams.AddNested("Web", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInAppMessageContent(v *types.InAppMessageContent) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InAppMessageContent"} + if v.BodyConfig != nil { + if err := validateInAppMessageBodyConfig(v.BodyConfig); err != nil { + invalidParams.AddNested("BodyConfig", err.(smithy.InvalidParamsError)) + } + } + if v.HeaderConfig != nil { + if err := validateInAppMessageHeaderConfig(v.HeaderConfig); err != nil { + invalidParams.AddNested("HeaderConfig", err.(smithy.InvalidParamsError)) + } + } + if v.PrimaryBtn != nil { + if err := validateInAppMessageButton(v.PrimaryBtn); err != nil { + invalidParams.AddNested("PrimaryBtn", err.(smithy.InvalidParamsError)) + } + } + if v.SecondaryBtn != nil { + if err := validateInAppMessageButton(v.SecondaryBtn); err != nil { + invalidParams.AddNested("SecondaryBtn", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInAppMessageHeaderConfig(v *types.InAppMessageHeaderConfig) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InAppMessageHeaderConfig"} + if len(v.Alignment) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Alignment")) + } + if v.Header == nil { + invalidParams.Add(smithy.NewErrParamRequired("Header")) + } + if v.TextColor == nil { + invalidParams.Add(smithy.NewErrParamRequired("TextColor")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInAppTemplateRequest(v *types.InAppTemplateRequest) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InAppTemplateRequest"} + if v.Content != nil { + if err := validateListOfInAppMessageContent(v.Content); err != nil { + invalidParams.AddNested("Content", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateListOfInAppMessageContent(v []types.InAppMessageContent) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListOfInAppMessageContent"} + for i := range v { + if err := validateInAppMessageContent(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateListOfMultiConditionalBranch(v []types.MultiConditionalBranch) error { if v == nil { return nil @@ -3277,6 +3572,23 @@ func validateMapOfMetricDimension(v map[string]types.MetricDimension) error { } } +func validateMessageConfiguration(v *types.MessageConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MessageConfiguration"} + if v.InAppMessage != nil { + if err := validateCampaignInAppMessage(v.InAppMessage); err != nil { + invalidParams.AddNested("InAppMessage", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateMessageRequest(v *types.MessageRequest) error { if v == nil { return nil @@ -3341,6 +3653,21 @@ func validateMultiConditionalSplitActivity(v *types.MultiConditionalSplitActivit } } +func validateOverrideButtonConfiguration(v *types.OverrideButtonConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "OverrideButtonConfiguration"} + if len(v.ButtonAction) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("ButtonAction")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateRecencyDimension(v *types.RecencyDimension) error { if v == nil { return nil @@ -3719,6 +4046,11 @@ func validateWriteCampaignRequest(v *types.WriteCampaignRequest) error { invalidParams.AddNested("CustomDeliveryConfiguration", err.(smithy.InvalidParamsError)) } } + if v.MessageConfiguration != nil { + if err := validateMessageConfiguration(v.MessageConfiguration); err != nil { + invalidParams.AddNested("MessageConfiguration", err.(smithy.InvalidParamsError)) + } + } if v.Schedule != nil { if err := validateSchedule(v.Schedule); err != nil { invalidParams.AddNested("Schedule", err.(smithy.InvalidParamsError)) @@ -3806,6 +4138,11 @@ func validateWriteTreatmentResource(v *types.WriteTreatmentResource) error { invalidParams.AddNested("CustomDeliveryConfiguration", err.(smithy.InvalidParamsError)) } } + if v.MessageConfiguration != nil { + if err := validateMessageConfiguration(v.MessageConfiguration); err != nil { + invalidParams.AddNested("MessageConfiguration", err.(smithy.InvalidParamsError)) + } + } if v.Schedule != nil { if err := validateSchedule(v.Schedule); err != nil { invalidParams.AddNested("Schedule", err.(smithy.InvalidParamsError)) @@ -3921,6 +4258,28 @@ func validateOpCreateImportJobInput(v *CreateImportJobInput) error { } } +func validateOpCreateInAppTemplateInput(v *CreateInAppTemplateInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateInAppTemplateInput"} + if v.InAppTemplateRequest == nil { + invalidParams.Add(smithy.NewErrParamRequired("InAppTemplateRequest")) + } else if v.InAppTemplateRequest != nil { + if err := validateInAppTemplateRequest(v.InAppTemplateRequest); err != nil { + invalidParams.AddNested("InAppTemplateRequest", err.(smithy.InvalidParamsError)) + } + } + if v.TemplateName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TemplateName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateJourneyInput(v *CreateJourneyInput) error { if v == nil { return nil @@ -4239,6 +4598,21 @@ func validateOpDeleteGcmChannelInput(v *DeleteGcmChannelInput) error { } } +func validateOpDeleteInAppTemplateInput(v *DeleteInAppTemplateInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteInAppTemplateInput"} + if v.TemplateName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TemplateName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDeleteJourneyInput(v *DeleteJourneyInput) error { if v == nil { return nil @@ -4791,6 +5165,39 @@ func validateOpGetImportJobsInput(v *GetImportJobsInput) error { } } +func validateOpGetInAppMessagesInput(v *GetInAppMessagesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetInAppMessagesInput"} + if v.ApplicationId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ApplicationId")) + } + if v.EndpointId == nil { + invalidParams.Add(smithy.NewErrParamRequired("EndpointId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetInAppTemplateInput(v *GetInAppTemplateInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetInAppTemplateInput"} + if v.TemplateName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TemplateName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpGetJourneyDateRangeKpiInput(v *GetJourneyDateRangeKpiInput) error { if v == nil { return nil @@ -5561,6 +5968,28 @@ func validateOpUpdateGcmChannelInput(v *UpdateGcmChannelInput) error { } } +func validateOpUpdateInAppTemplateInput(v *UpdateInAppTemplateInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateInAppTemplateInput"} + if v.InAppTemplateRequest == nil { + invalidParams.Add(smithy.NewErrParamRequired("InAppTemplateRequest")) + } else if v.InAppTemplateRequest != nil { + if err := validateInAppTemplateRequest(v.InAppTemplateRequest); err != nil { + invalidParams.AddNested("InAppTemplateRequest", err.(smithy.InvalidParamsError)) + } + } + if v.TemplateName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TemplateName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpUpdateJourneyInput(v *UpdateJourneyInput) error { if v == nil { return nil diff --git a/service/quicksight/deserializers.go b/service/quicksight/deserializers.go index 4098187c2d9..264520ae7fe 100644 --- a/service/quicksight/deserializers.go +++ b/service/quicksight/deserializers.go @@ -23327,6 +23327,46 @@ func awsRestjson1_deserializeDocumentAmazonElasticsearchParameters(v **types.Ama return nil } +func awsRestjson1_deserializeDocumentAmazonOpenSearchParameters(v **types.AmazonOpenSearchParameters, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AmazonOpenSearchParameters + if *v == nil { + sv = &types.AmazonOpenSearchParameters{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Domain": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Domain to be of type string, got %T instead", value) + } + sv.Domain = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentAnalysis(v **types.Analysis, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -26321,6 +26361,16 @@ loop: uv = &types.DataSourceParametersMemberAmazonElasticsearchParameters{Value: mv} break loop + case "AmazonOpenSearchParameters": + var mv types.AmazonOpenSearchParameters + destAddr := &mv + if err := awsRestjson1_deserializeDocumentAmazonOpenSearchParameters(&destAddr, value); err != nil { + return err + } + mv = *destAddr + uv = &types.DataSourceParametersMemberAmazonOpenSearchParameters{Value: mv} + break loop + case "AthenaParameters": var mv types.AthenaParameters destAddr := &mv diff --git a/service/quicksight/serializers.go b/service/quicksight/serializers.go index f5e7f4cb333..19c22af9d6c 100644 --- a/service/quicksight/serializers.go +++ b/service/quicksight/serializers.go @@ -9687,6 +9687,18 @@ func awsRestjson1_serializeDocumentAmazonElasticsearchParameters(v *types.Amazon return nil } +func awsRestjson1_serializeDocumentAmazonOpenSearchParameters(v *types.AmazonOpenSearchParameters, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Domain != nil { + ok := object.Key("Domain") + ok.String(*v.Domain) + } + + return nil +} + func awsRestjson1_serializeDocumentAnalysisSearchFilter(v *types.AnalysisSearchFilter, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -10344,6 +10356,12 @@ func awsRestjson1_serializeDocumentDataSourceParameters(v types.DataSourceParame return err } + case *types.DataSourceParametersMemberAmazonOpenSearchParameters: + av := object.Key("AmazonOpenSearchParameters") + if err := awsRestjson1_serializeDocumentAmazonOpenSearchParameters(&uv.Value, av); err != nil { + return err + } + case *types.DataSourceParametersMemberAthenaParameters: av := object.Key("AthenaParameters") if err := awsRestjson1_serializeDocumentAthenaParameters(&uv.Value, av); err != nil { diff --git a/service/quicksight/types/enums.go b/service/quicksight/types/enums.go index 734ac94839d..30cbcdb7fe0 100644 --- a/service/quicksight/types/enums.go +++ b/service/quicksight/types/enums.go @@ -273,6 +273,7 @@ const ( DataSourceTypeTeradata DataSourceType = "TERADATA" DataSourceTypeTwitter DataSourceType = "TWITTER" DataSourceTypeTimestream DataSourceType = "TIMESTREAM" + DataSourceTypeAmazonOpensearch DataSourceType = "AMAZON_OPENSEARCH" ) // Values returns all known values for DataSourceType. Note that this can be @@ -303,6 +304,7 @@ func (DataSourceType) Values() []DataSourceType { "TERADATA", "TWITTER", "TIMESTREAM", + "AMAZON_OPENSEARCH", } } diff --git a/service/quicksight/types/types.go b/service/quicksight/types/types.go index bb134015aa6..57629c6262b 100644 --- a/service/quicksight/types/types.go +++ b/service/quicksight/types/types.go @@ -72,6 +72,14 @@ type AmazonElasticsearchParameters struct { noSmithyDocumentSerde } +type AmazonOpenSearchParameters struct { + + // This member is required. + Domain *string + + noSmithyDocumentSerde +} + // Metadata structure for an analysis in Amazon QuickSight type Analysis struct { @@ -942,6 +950,7 @@ type DataSourceErrorInfo struct { // DataSourceParametersMemberSqlServerParameters // DataSourceParametersMemberTeradataParameters // DataSourceParametersMemberTwitterParameters +// DataSourceParametersMemberAmazonOpenSearchParameters type DataSourceParameters interface { isDataSourceParameters() } @@ -1126,6 +1135,14 @@ type DataSourceParametersMemberTwitterParameters struct { func (*DataSourceParametersMemberTwitterParameters) isDataSourceParameters() {} +type DataSourceParametersMemberAmazonOpenSearchParameters struct { + Value AmazonOpenSearchParameters + + noSmithyDocumentSerde +} + +func (*DataSourceParametersMemberAmazonOpenSearchParameters) isDataSourceParameters() {} + // A date-time parameter. type DateTimeParameter struct { diff --git a/service/quicksight/types/types_exported_test.go b/service/quicksight/types/types_exported_test.go index 087dd920393..8ca8529f75d 100644 --- a/service/quicksight/types/types_exported_test.go +++ b/service/quicksight/types/types_exported_test.go @@ -14,6 +14,9 @@ func ExampleDataSourceParameters_outputUsage() { case *types.DataSourceParametersMemberAmazonElasticsearchParameters: _ = v.Value // Value is types.AmazonElasticsearchParameters + case *types.DataSourceParametersMemberAmazonOpenSearchParameters: + _ = v.Value // Value is types.AmazonOpenSearchParameters + case *types.DataSourceParametersMemberAthenaParameters: _ = v.Value // Value is types.AthenaParameters @@ -83,6 +86,7 @@ func ExampleDataSourceParameters_outputUsage() { var _ *types.AmazonElasticsearchParameters var _ *types.MariaDbParameters var _ *types.RdsParameters +var _ *types.AmazonOpenSearchParameters var _ *types.RedshiftParameters var _ *types.OracleParameters var _ *types.JiraParameters diff --git a/service/quicksight/validators.go b/service/quicksight/validators.go index dc228825066..6f98063d505 100644 --- a/service/quicksight/validators.go +++ b/service/quicksight/validators.go @@ -2737,6 +2737,21 @@ func validateAmazonElasticsearchParameters(v *types.AmazonElasticsearchParameter } } +func validateAmazonOpenSearchParameters(v *types.AmazonOpenSearchParameters) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AmazonOpenSearchParameters"} + if v.Domain == nil { + invalidParams.Add(smithy.NewErrParamRequired("Domain")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateAnalysisSourceEntity(v *types.AnalysisSourceEntity) error { if v == nil { return nil @@ -3151,6 +3166,11 @@ func validateDataSourceParameters(v types.DataSourceParameters) error { invalidParams.AddNested("[AmazonElasticsearchParameters]", err.(smithy.InvalidParamsError)) } + case *types.DataSourceParametersMemberAmazonOpenSearchParameters: + if err := validateAmazonOpenSearchParameters(&uv.Value); err != nil { + invalidParams.AddNested("[AmazonOpenSearchParameters]", err.(smithy.InvalidParamsError)) + } + case *types.DataSourceParametersMemberAuroraParameters: if err := validateAuroraParameters(&uv.Value); err != nil { invalidParams.AddNested("[AuroraParameters]", err.(smithy.InvalidParamsError)) diff --git a/service/rds/api_op_ModifyCurrentDBClusterCapacity.go b/service/rds/api_op_ModifyCurrentDBClusterCapacity.go index b68af22e5f3..7bf097d2931 100644 --- a/service/rds/api_op_ModifyCurrentDBClusterCapacity.go +++ b/service/rds/api_op_ModifyCurrentDBClusterCapacity.go @@ -25,7 +25,8 @@ import ( // finding a scaling point might be dropped. For more information about scaling // points, see Autoscaling for Aurora Serverless // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.auto-scaling) -// in the Amazon Aurora User Guide. This action only applies to Aurora DB clusters. +// in the Amazon Aurora User Guide. This action only applies to Aurora Serverless +// DB clusters. func (c *Client) ModifyCurrentDBClusterCapacity(ctx context.Context, params *ModifyCurrentDBClusterCapacityInput, optFns ...func(*Options)) (*ModifyCurrentDBClusterCapacityOutput, error) { if params == nil { params = &ModifyCurrentDBClusterCapacityInput{} @@ -64,9 +65,7 @@ type ModifyCurrentDBClusterCapacityInput struct { // The amount of time, in seconds, that Aurora Serverless tries to find a scaling // point to perform seamless scaling before enforcing the timeout action. The - // default is 300. - // - // * Value must be from 10 through 600. + // default is 300. Specify a value between 10 and 600 seconds. SecondsBeforeTimeout *int32 // The action to take when the timeout is reached, either ForceApplyCapacityChange diff --git a/service/rds/deserializers.go b/service/rds/deserializers.go index 937443ef5d6..5d29154837b 100644 --- a/service/rds/deserializers.go +++ b/service/rds/deserializers.go @@ -41347,6 +41347,23 @@ func awsAwsquery_deserializeDocumentScalingConfigurationInfo(v **types.ScalingCo sv.MinCapacity = ptr.Int32(int32(i64)) } + case strings.EqualFold("SecondsBeforeTimeout", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.SecondsBeforeTimeout = ptr.Int32(int32(i64)) + } + case strings.EqualFold("SecondsUntilAutoPause", t.Name.Local): val, err := decoder.Value() if err != nil { diff --git a/service/rds/serializers.go b/service/rds/serializers.go index 7bf4e4b7dac..2b2a47118d7 100644 --- a/service/rds/serializers.go +++ b/service/rds/serializers.go @@ -8143,6 +8143,11 @@ func awsAwsquery_serializeDocumentScalingConfiguration(v *types.ScalingConfigura objectKey.Integer(*v.MinCapacity) } + if v.SecondsBeforeTimeout != nil { + objectKey := object.Key("SecondsBeforeTimeout") + objectKey.Integer(*v.SecondsBeforeTimeout) + } + if v.SecondsUntilAutoPause != nil { objectKey := object.Key("SecondsUntilAutoPause") objectKey.Integer(*v.SecondsUntilAutoPause) diff --git a/service/rds/types/types.go b/service/rds/types/types.go index 75a56f1ce84..b8d7807781e 100644 --- a/service/rds/types/types.go +++ b/service/rds/types/types.go @@ -3185,6 +3185,11 @@ type ScalingConfiguration struct { // 384. The minimum capacity must be less than or equal to the maximum capacity. MinCapacity *int32 + // The amount of time, in seconds, that Aurora Serverless tries to find a scaling + // point to perform seamless scaling before enforcing the timeout action. The + // default is 300. Specify a value between 60 and 600 seconds. + SecondsBeforeTimeout *int32 + // The time, in seconds, before an Aurora DB cluster in serverless mode is paused. // Specify a value between 300 and 86,400 seconds. SecondsUntilAutoPause *int32 @@ -3220,13 +3225,21 @@ type ScalingConfigurationInfo struct { // The maximum capacity for the Aurora DB cluster in serverless DB engine mode. MinCapacity *int32 + // The number of seconds before scaling times out. What happens when an attempted + // scaling action times out is determined by the TimeoutAction setting. + SecondsBeforeTimeout *int32 + // The remaining amount of time, in seconds, before the Aurora DB cluster in // serverless mode is paused. A DB cluster can be paused only when it's idle (it // has no connections). SecondsUntilAutoPause *int32 - // The timeout action of a call to ModifyCurrentDBClusterCapacity, either - // ForceApplyCapacityChange or RollbackCapacityChange. + // The action that occurs when Aurora times out while attempting to change the + // capacity of an Aurora Serverless cluster. The value is either + // ForceApplyCapacityChange or RollbackCapacityChange. ForceApplyCapacityChange, + // the default, sets the capacity to the specified value as soon as possible. + // RollbackCapacityChange ignores the capacity change if a scaling point isn't + // found in the timeout period. TimeoutAction *string noSmithyDocumentSerde diff --git a/service/robomaker/api_op_CreateRobotApplication.go b/service/robomaker/api_op_CreateRobotApplication.go index 3382cd3ab5a..d362c9d2360 100644 --- a/service/robomaker/api_op_CreateRobotApplication.go +++ b/service/robomaker/api_op_CreateRobotApplication.go @@ -40,9 +40,11 @@ type CreateRobotApplicationInput struct { // This member is required. RobotSoftwareSuite *types.RobotSoftwareSuite + // The object that contains that URI of the Docker image that you use for your + // robot application. + Environment *types.Environment + // The sources of the robot application. - // - // This member is required. Sources []types.SourceConfig // A map that contains tag keys and tag values that are attached to the robot @@ -57,6 +59,10 @@ type CreateRobotApplicationOutput struct { // The Amazon Resource Name (ARN) of the robot application. Arn *string + // An object that contains the Docker image URI used to a create your robot + // application. + Environment *types.Environment + // The time, in milliseconds since the epoch, when the robot application was last // updated. LastUpdatedAt *time.Time diff --git a/service/robomaker/api_op_CreateRobotApplicationVersion.go b/service/robomaker/api_op_CreateRobotApplicationVersion.go index 8ee1912cca4..1945fd2a8a7 100644 --- a/service/robomaker/api_op_CreateRobotApplicationVersion.go +++ b/service/robomaker/api_op_CreateRobotApplicationVersion.go @@ -39,6 +39,14 @@ type CreateRobotApplicationVersionInput struct { // matches the latest revision ID, a new version will be created. CurrentRevisionId *string + // A SHA256 identifier for the Docker image that you use for your robot + // application. + ImageDigest *string + + // The Amazon S3 identifier for the zip file bundle that you use for your robot + // application. + S3Etags []string + noSmithyDocumentSerde } @@ -47,6 +55,10 @@ type CreateRobotApplicationVersionOutput struct { // The Amazon Resource Name (ARN) of the robot application. Arn *string + // The object that contains the Docker image URI used to create your robot + // application. + Environment *types.Environment + // The time, in milliseconds since the epoch, when the robot application was last // updated. LastUpdatedAt *time.Time diff --git a/service/robomaker/api_op_CreateSimulationApplication.go b/service/robomaker/api_op_CreateSimulationApplication.go index 1e7e7ceb073..4ed14a9e4b7 100644 --- a/service/robomaker/api_op_CreateSimulationApplication.go +++ b/service/robomaker/api_op_CreateSimulationApplication.go @@ -45,14 +45,16 @@ type CreateSimulationApplicationInput struct { // This member is required. SimulationSoftwareSuite *types.SimulationSoftwareSuite - // The sources of the simulation application. - // - // This member is required. - Sources []types.SourceConfig + // The object that contains the Docker image URI used to create your simulation + // application. + Environment *types.Environment // The rendering engine for the simulation application. RenderingEngine *types.RenderingEngine + // The sources of the simulation application. + Sources []types.SourceConfig + // A map that contains tag keys and tag values that are attached to the simulation // application. Tags map[string]string @@ -65,6 +67,10 @@ type CreateSimulationApplicationOutput struct { // The Amazon Resource Name (ARN) of the simulation application. Arn *string + // The object that contains the Docker image URI that you used to create your + // simulation application. + Environment *types.Environment + // The time, in milliseconds since the epoch, when the simulation application was // last updated. LastUpdatedAt *time.Time diff --git a/service/robomaker/api_op_CreateSimulationApplicationVersion.go b/service/robomaker/api_op_CreateSimulationApplicationVersion.go index 3cf587b801d..6e6de6e7083 100644 --- a/service/robomaker/api_op_CreateSimulationApplicationVersion.go +++ b/service/robomaker/api_op_CreateSimulationApplicationVersion.go @@ -39,6 +39,14 @@ type CreateSimulationApplicationVersionInput struct { // and it matches the latest revision ID, a new version will be created. CurrentRevisionId *string + // The SHA256 digest used to identify the Docker image URI used to created the + // simulation application. + ImageDigest *string + + // The Amazon S3 eTag identifier for the zip file bundle that you use to create the + // simulation application. + S3Etags []string + noSmithyDocumentSerde } @@ -47,6 +55,10 @@ type CreateSimulationApplicationVersionOutput struct { // The Amazon Resource Name (ARN) of the simulation application. Arn *string + // The object that contains the Docker image URI used to create the simulation + // application. + Environment *types.Environment + // The time, in milliseconds since the epoch, when the simulation application was // last updated. LastUpdatedAt *time.Time diff --git a/service/robomaker/api_op_DescribeRobotApplication.go b/service/robomaker/api_op_DescribeRobotApplication.go index 9ebb05ab3d2..879c06699d7 100644 --- a/service/robomaker/api_op_DescribeRobotApplication.go +++ b/service/robomaker/api_op_DescribeRobotApplication.go @@ -46,6 +46,14 @@ type DescribeRobotApplicationOutput struct { // The Amazon Resource Name (ARN) of the robot application. Arn *string + // The object that contains the Docker image URI used to create the robot + // application. + Environment *types.Environment + + // A SHA256 identifier for the Docker image that you use for your robot + // application. + ImageDigest *string + // The time, in milliseconds since the epoch, when the robot application was last // updated. LastUpdatedAt *time.Time diff --git a/service/robomaker/api_op_DescribeSimulationApplication.go b/service/robomaker/api_op_DescribeSimulationApplication.go index daa880b6d40..ed0847b4f57 100644 --- a/service/robomaker/api_op_DescribeSimulationApplication.go +++ b/service/robomaker/api_op_DescribeSimulationApplication.go @@ -46,6 +46,14 @@ type DescribeSimulationApplicationOutput struct { // The Amazon Resource Name (ARN) of the robot simulation application. Arn *string + // The object that contains the Docker image URI used to create the simulation + // application. + Environment *types.Environment + + // A SHA256 identifier for the Docker image that you use for your simulation + // application. + ImageDigest *string + // The time, in milliseconds since the epoch, when the simulation application was // last updated. LastUpdatedAt *time.Time diff --git a/service/robomaker/api_op_UpdateRobotApplication.go b/service/robomaker/api_op_UpdateRobotApplication.go index 8552649fc27..0df2f3df649 100644 --- a/service/robomaker/api_op_UpdateRobotApplication.go +++ b/service/robomaker/api_op_UpdateRobotApplication.go @@ -40,14 +40,15 @@ type UpdateRobotApplicationInput struct { // This member is required. RobotSoftwareSuite *types.RobotSoftwareSuite - // The sources of the robot application. - // - // This member is required. - Sources []types.SourceConfig - // The revision id for the robot application. CurrentRevisionId *string + // The object that contains the Docker image URI for your robot application. + Environment *types.Environment + + // The sources of the robot application. + Sources []types.SourceConfig + noSmithyDocumentSerde } @@ -56,6 +57,9 @@ type UpdateRobotApplicationOutput struct { // The Amazon Resource Name (ARN) of the updated robot application. Arn *string + // The object that contains the Docker image URI for your robot application. + Environment *types.Environment + // The time, in milliseconds since the epoch, when the robot application was last // updated. LastUpdatedAt *time.Time diff --git a/service/robomaker/api_op_UpdateSimulationApplication.go b/service/robomaker/api_op_UpdateSimulationApplication.go index a3b6303d257..b7dd4d7c971 100644 --- a/service/robomaker/api_op_UpdateSimulationApplication.go +++ b/service/robomaker/api_op_UpdateSimulationApplication.go @@ -45,17 +45,18 @@ type UpdateSimulationApplicationInput struct { // This member is required. SimulationSoftwareSuite *types.SimulationSoftwareSuite - // The sources of the simulation application. - // - // This member is required. - Sources []types.SourceConfig - // The revision id for the robot application. CurrentRevisionId *string + // The object that contains the Docker image URI for your simulation application. + Environment *types.Environment + // The rendering engine for the simulation application. RenderingEngine *types.RenderingEngine + // The sources of the simulation application. + Sources []types.SourceConfig + noSmithyDocumentSerde } @@ -64,6 +65,10 @@ type UpdateSimulationApplicationOutput struct { // The Amazon Resource Name (ARN) of the updated simulation application. Arn *string + // The object that contains the Docker image URI used for your simulation + // application. + Environment *types.Environment + // The time, in milliseconds since the epoch, when the simulation application was // last updated. LastUpdatedAt *time.Time diff --git a/service/robomaker/deserializers.go b/service/robomaker/deserializers.go index e8717f3d49e..d130ea6337d 100644 --- a/service/robomaker/deserializers.go +++ b/service/robomaker/deserializers.go @@ -1588,6 +1588,11 @@ func awsRestjson1_deserializeOpDocumentCreateRobotApplicationOutput(v **CreateRo sv.Arn = ptr.String(jtv) } + case "environment": + if err := awsRestjson1_deserializeDocumentEnvironment(&sv.Environment, value); err != nil { + return err + } + case "lastUpdatedAt": if value != nil { switch jtv := value.(type) { @@ -1809,6 +1814,11 @@ func awsRestjson1_deserializeOpDocumentCreateRobotApplicationVersionOutput(v **C sv.Arn = ptr.String(jtv) } + case "environment": + if err := awsRestjson1_deserializeDocumentEnvironment(&sv.Environment, value); err != nil { + return err + } + case "lastUpdatedAt": if value != nil { switch jtv := value.(type) { @@ -2028,6 +2038,11 @@ func awsRestjson1_deserializeOpDocumentCreateSimulationApplicationOutput(v **Cre sv.Arn = ptr.String(jtv) } + case "environment": + if err := awsRestjson1_deserializeDocumentEnvironment(&sv.Environment, value); err != nil { + return err + } + case "lastUpdatedAt": if value != nil { switch jtv := value.(type) { @@ -2259,6 +2274,11 @@ func awsRestjson1_deserializeOpDocumentCreateSimulationApplicationVersionOutput( sv.Arn = ptr.String(jtv) } + case "environment": + if err := awsRestjson1_deserializeDocumentEnvironment(&sv.Environment, value); err != nil { + return err + } + case "lastUpdatedAt": if value != nil { switch jtv := value.(type) { @@ -4792,6 +4812,20 @@ func awsRestjson1_deserializeOpDocumentDescribeRobotApplicationOutput(v **Descri sv.Arn = ptr.String(jtv) } + case "environment": + if err := awsRestjson1_deserializeDocumentEnvironment(&sv.Environment, value); err != nil { + return err + } + + case "imageDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) + } + sv.ImageDigest = ptr.String(jtv) + } + case "lastUpdatedAt": if value != nil { switch jtv := value.(type) { @@ -5010,6 +5044,20 @@ func awsRestjson1_deserializeOpDocumentDescribeSimulationApplicationOutput(v **D sv.Arn = ptr.String(jtv) } + case "environment": + if err := awsRestjson1_deserializeDocumentEnvironment(&sv.Environment, value); err != nil { + return err + } + + case "imageDigest": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ImageDigest to be of type string, got %T instead", value) + } + sv.ImageDigest = ptr.String(jtv) + } + case "lastUpdatedAt": if value != nil { switch jtv := value.(type) { @@ -9752,6 +9800,11 @@ func awsRestjson1_deserializeOpDocumentUpdateRobotApplicationOutput(v **UpdateRo sv.Arn = ptr.String(jtv) } + case "environment": + if err := awsRestjson1_deserializeDocumentEnvironment(&sv.Environment, value); err != nil { + return err + } + case "lastUpdatedAt": if value != nil { switch jtv := value.(type) { @@ -9968,6 +10021,11 @@ func awsRestjson1_deserializeOpDocumentUpdateSimulationApplicationOutput(v **Upd sv.Arn = ptr.String(jtv) } + case "environment": + if err := awsRestjson1_deserializeDocumentEnvironment(&sv.Environment, value); err != nil { + return err + } + case "lastUpdatedAt": if value != nil { switch jtv := value.(type) { @@ -11403,6 +11461,46 @@ func awsRestjson1_deserializeDocumentDeploymentLaunchConfig(v **types.Deployment return nil } +func awsRestjson1_deserializeDocumentEnvironment(v **types.Environment, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Environment + if *v == nil { + sv = &types.Environment{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "uri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RepositoryUrl to be of type string, got %T instead", value) + } + sv.Uri = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentEnvironmentVariableMap(v *map[string]string, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/robomaker/serializers.go b/service/robomaker/serializers.go index ec788fc8366..962ef2e1d43 100644 --- a/service/robomaker/serializers.go +++ b/service/robomaker/serializers.go @@ -808,6 +808,13 @@ func awsRestjson1_serializeOpDocumentCreateRobotApplicationInput(v *CreateRobotA object := value.Object() defer object.Close() + if v.Environment != nil { + ok := object.Key("environment") + if err := awsRestjson1_serializeDocumentEnvironment(v.Environment, ok); err != nil { + return err + } + } + if v.Name != nil { ok := object.Key("name") ok.String(*v.Name) @@ -907,6 +914,18 @@ func awsRestjson1_serializeOpDocumentCreateRobotApplicationVersionInput(v *Creat ok.String(*v.CurrentRevisionId) } + if v.ImageDigest != nil { + ok := object.Key("imageDigest") + ok.String(*v.ImageDigest) + } + + if v.S3Etags != nil { + ok := object.Key("s3Etags") + if err := awsRestjson1_serializeDocumentS3Etags(v.S3Etags, ok); err != nil { + return err + } + } + return nil } @@ -970,6 +989,13 @@ func awsRestjson1_serializeOpDocumentCreateSimulationApplicationInput(v *CreateS object := value.Object() defer object.Close() + if v.Environment != nil { + ok := object.Key("environment") + if err := awsRestjson1_serializeDocumentEnvironment(v.Environment, ok); err != nil { + return err + } + } + if v.Name != nil { ok := object.Key("name") ok.String(*v.Name) @@ -1083,6 +1109,18 @@ func awsRestjson1_serializeOpDocumentCreateSimulationApplicationVersionInput(v * ok.String(*v.CurrentRevisionId) } + if v.ImageDigest != nil { + ok := object.Key("imageDigest") + ok.String(*v.ImageDigest) + } + + if v.S3Etags != nil { + ok := object.Key("s3Etags") + if err := awsRestjson1_serializeDocumentS3Etags(v.S3Etags, ok); err != nil { + return err + } + } + return nil } @@ -4220,6 +4258,13 @@ func awsRestjson1_serializeOpDocumentUpdateRobotApplicationInput(v *UpdateRobotA ok.String(*v.CurrentRevisionId) } + if v.Environment != nil { + ok := object.Key("environment") + if err := awsRestjson1_serializeDocumentEnvironment(v.Environment, ok); err != nil { + return err + } + } + if v.RobotSoftwareSuite != nil { ok := object.Key("robotSoftwareSuite") if err := awsRestjson1_serializeDocumentRobotSoftwareSuite(v.RobotSoftwareSuite, ok); err != nil { @@ -4307,6 +4352,13 @@ func awsRestjson1_serializeOpDocumentUpdateSimulationApplicationInput(v *UpdateS ok.String(*v.CurrentRevisionId) } + if v.Environment != nil { + ok := object.Key("environment") + if err := awsRestjson1_serializeDocumentEnvironment(v.Environment, ok); err != nil { + return err + } + } + if v.RenderingEngine != nil { ok := object.Key("renderingEngine") if err := awsRestjson1_serializeDocumentRenderingEngine(v.RenderingEngine, ok); err != nil { @@ -4613,6 +4665,18 @@ func awsRestjson1_serializeDocumentDeploymentLaunchConfig(v *types.DeploymentLau return nil } +func awsRestjson1_serializeDocumentEnvironment(v *types.Environment, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Uri != nil { + ok := object.Key("uri") + ok.String(*v.Uri) + } + + return nil +} + func awsRestjson1_serializeDocumentEnvironmentVariableMap(v map[string]string, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -4876,6 +4940,17 @@ func awsRestjson1_serializeDocumentRobotSoftwareSuite(v *types.RobotSoftwareSuit return nil } +func awsRestjson1_serializeDocumentS3Etags(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsRestjson1_serializeDocumentS3Keys(v []string, value smithyjson.Value) error { array := value.Array() defer array.Close() diff --git a/service/robomaker/types/types.go b/service/robomaker/types/types.go index df4453eefd2..84e59ade1ea 100644 --- a/service/robomaker/types/types.go +++ b/service/robomaker/types/types.go @@ -185,6 +185,16 @@ type DeploymentLaunchConfig struct { noSmithyDocumentSerde } +// The object that contains the Docker image URI for either your robot or +// simulation applications. +type Environment struct { + + // The Docker image URI for either your robot or simulation applications. + Uri *string + + noSmithyDocumentSerde +} + // Information about a failed create simulation job request. type FailedCreateSimulationJobRequest struct { diff --git a/service/robomaker/validators.go b/service/robomaker/validators.go index 0066a8fde7d..b056ab4ebac 100644 --- a/service/robomaker/validators.go +++ b/service/robomaker/validators.go @@ -1723,9 +1723,6 @@ func validateOpCreateRobotApplicationInput(v *CreateRobotApplicationInput) error if v.Name == nil { invalidParams.Add(smithy.NewErrParamRequired("Name")) } - if v.Sources == nil { - invalidParams.Add(smithy.NewErrParamRequired("Sources")) - } if v.RobotSoftwareSuite == nil { invalidParams.Add(smithy.NewErrParamRequired("RobotSoftwareSuite")) } @@ -1780,9 +1777,6 @@ func validateOpCreateSimulationApplicationInput(v *CreateSimulationApplicationIn if v.Name == nil { invalidParams.Add(smithy.NewErrParamRequired("Name")) } - if v.Sources == nil { - invalidParams.Add(smithy.NewErrParamRequired("Sources")) - } if v.SimulationSoftwareSuite == nil { invalidParams.Add(smithy.NewErrParamRequired("SimulationSoftwareSuite")) } @@ -2294,9 +2288,6 @@ func validateOpUpdateRobotApplicationInput(v *UpdateRobotApplicationInput) error if v.Application == nil { invalidParams.Add(smithy.NewErrParamRequired("Application")) } - if v.Sources == nil { - invalidParams.Add(smithy.NewErrParamRequired("Sources")) - } if v.RobotSoftwareSuite == nil { invalidParams.Add(smithy.NewErrParamRequired("RobotSoftwareSuite")) } @@ -2315,9 +2306,6 @@ func validateOpUpdateSimulationApplicationInput(v *UpdateSimulationApplicationIn if v.Application == nil { invalidParams.Add(smithy.NewErrParamRequired("Application")) } - if v.Sources == nil { - invalidParams.Add(smithy.NewErrParamRequired("Sources")) - } if v.SimulationSoftwareSuite == nil { invalidParams.Add(smithy.NewErrParamRequired("SimulationSoftwareSuite")) } diff --git a/service/s3/api_op_CompleteMultipartUpload.go b/service/s3/api_op_CompleteMultipartUpload.go index 993572f722d..dbee35ec5d1 100644 --- a/service/s3/api_op_CompleteMultipartUpload.go +++ b/service/s3/api_op_CompleteMultipartUpload.go @@ -210,14 +210,14 @@ type CompleteMultipartUploadOutput struct { RequestCharged types.RequestCharged // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) - // that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. SSEKMSKeyId *string // If you specified server-side encryption either with an Amazon S3-managed - // encryption key or an Amazon Web Services KMS customer master key (CMK) in your - // initiate multipart upload request, the response includes this header. It - // confirms the encryption algorithm that Amazon S3 used to encrypt the object. + // encryption key or an Amazon Web Services KMS key in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. ServerSideEncryption types.ServerSideEncryption // Version ID of the newly created object, in case the bucket has versioning turned diff --git a/service/s3/api_op_CopyObject.go b/service/s3/api_op_CopyObject.go index 8196b76a93a..e84472d8931 100644 --- a/service/s3/api_op_CopyObject.go +++ b/service/s3/api_op_CopyObject.go @@ -423,8 +423,8 @@ type CopyObjectOutput struct { SSEKMSEncryptionContext *string // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) - // that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 diff --git a/service/s3/api_op_CreateMultipartUpload.go b/service/s3/api_op_CreateMultipartUpload.go index 92d25bdf44b..ecfa09c9e19 100644 --- a/service/s3/api_op_CreateMultipartUpload.go +++ b/service/s3/api_op_CreateMultipartUpload.go @@ -45,27 +45,26 @@ import ( // multipart upload. You can optionally request server-side encryption. For // server-side encryption, Amazon S3 encrypts your data as it writes it to disks in // its data centers and decrypts it when you access it. You can provide your own -// encryption key, or use Amazon Web Services Key Management Service (Amazon Web -// Services KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. -// If you choose to provide your own encryption key, the request headers you -// provide in UploadPart +// encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed +// encryption keys. If you choose to provide your own encryption key, the request +// headers you provide in UploadPart // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) and // UploadPartCopy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // requests must match the headers you used in the request to initiate the upload // by using CreateMultipartUpload. To perform a multipart upload with encryption -// using an Amazon Web Services KMS CMK, the requester must have permission to the +// using an Amazon Web Services KMS key, the requester must have permission to the // kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are // required because Amazon S3 must decrypt and read data from the encrypted file // parts before it completes the multipart upload. For more information, see // Multipart upload API and permissions // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) // in the Amazon S3 User Guide. If your Identity and Access Management (IAM) user -// or role is in the same Amazon Web Services account as the Amazon Web Services -// KMS CMK, then you must have these permissions on the key policy. If your IAM -// user or role belongs to a different account than the key, then you must have the -// permissions on both the key policy and your IAM user or role. For more -// information, see Protecting Data Using Server-Side Encryption +// or role is in the same Amazon Web Services account as the KMS key, then you must +// have these permissions on the key policy. If your IAM user or role belongs to a +// different account than the key, then you must have the permissions on both the +// key policy and your IAM user or role. For more information, see Protecting Data +// Using Server-Side Encryption // (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). // Access Permissions When copying an object, you can optionally specify the // accounts or groups that should be granted specific permissions on the new @@ -93,10 +92,10 @@ import ( // encryption keys or provide your own encryption key. // // * Use encryption keys -// managed by Amazon S3 or customer master keys (CMKs) stored in Amazon Web -// Services Key Management Service (Amazon Web Services KMS) – If you want Amazon -// Web Services to manage the keys used to encrypt data, specify the following -// headers in the request. +// managed by Amazon S3 or customer managed key stored in Amazon Web Services Key +// Management Service (Amazon Web Services KMS) – If you want Amazon Web Services +// to manage the keys used to encrypt data, specify the following headers in the +// request. // // * x-amz-server-side-encryption // @@ -109,12 +108,11 @@ import ( // If you specify // x-amz-server-side-encryption:aws:kms, but don't provide // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web -// Services managed CMK in Amazon Web Services KMS to protect the data. All GET and +// Services managed key in Amazon Web Services KMS to protect the data. All GET and // PUT requests for an object protected by Amazon Web Services KMS fail if you // don't make them with SSL or by using SigV4. For more information about -// server-side encryption with CMKs stored in Amazon Web Services KMS (SSE-KMS), -// see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon Web -// Services KMS +// server-side encryption with KMS key (SSE-KMS), see Protecting Data Using +// Server-Side Encryption with KMS keys // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). // // * @@ -131,9 +129,8 @@ import ( // x-amz-server-side-encryption-customer-key-MD5 // // For more information about -// server-side encryption with CMKs stored in Amazon Web Services KMS (SSE-KMS), -// see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon Web -// Services KMS +// server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using +// Server-Side Encryption with KMS keys // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). // // Access-Control-List @@ -371,12 +368,12 @@ type CreateMultipartUploadInput struct { // JSON with the encryption context key-value pairs. SSEKMSEncryptionContext *string - // Specifies the ID of the symmetric customer managed Amazon Web Services KMS CMK - // to use for object encryption. All GET and PUT requests for an object protected - // by Amazon Web Services KMS will fail if not made via SSL or using SigV4. For - // information about configuring using any of the officially supported Amazon Web - // Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version - // in Request Authentication + // Specifies the ID of the symmetric customer managed key to use for object + // encryption. All GET and PUT requests for an object protected by Amazon Web + // Services KMS will fail if not made via SSL or using SigV4. For information about + // configuring using any of the officially supported Amazon Web Services SDKs and + // Amazon Web Services CLI, see Specifying the Signature Version in Request + // Authentication // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 User Guide. SSEKMSKeyId *string @@ -469,8 +466,8 @@ type CreateMultipartUploadOutput struct { SSEKMSEncryptionContext *string // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) - // that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 diff --git a/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go index 7af33b45ec6..4e6d6e374ec 100644 --- a/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go +++ b/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go @@ -14,16 +14,17 @@ import ( // Deletes the S3 Intelligent-Tiering configuration from the specified bucket. The // S3 Intelligent-Tiering storage class is designed to optimize storage costs by // automatically moving data to the most cost-effective storage access tier, -// without additional operational overhead. S3 Intelligent-Tiering delivers -// automatic cost savings by moving data between access tiers, when access patterns -// change. The S3 Intelligent-Tiering storage class is suitable for objects larger -// than 128 KB that you plan to store for at least 30 days. If the size of an -// object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the frequent access tier rates in -// the S3 Intelligent-Tiering storage class. If you delete an object before the end -// of the 30-day minimum storage duration period, you are charged for 30 days. For -// more information, see Storage class for automatically optimizing frequently and -// infrequently accessed objects +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in two low latency and high throughput access +// tiers. For data that can be accessed asynchronously, you can choose to activate +// automatic archiving capabilities within the S3 Intelligent-Tiering storage +// class. The S3 Intelligent-Tiering storage class is the ideal storage class for +// data with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 KB, +// it is not eligible for auto-tiering. Smaller objects can be stored, but they are +// always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering +// storage class. For more information, see Storage class for automatically +// optimizing frequently and infrequently accessed objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). // Operations related to DeleteBucketIntelligentTieringConfiguration include: // diff --git a/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go index 9d96a1a8bbe..82e731ea988 100644 --- a/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go +++ b/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go @@ -15,16 +15,17 @@ import ( // Gets the S3 Intelligent-Tiering configuration from the specified bucket. The S3 // Intelligent-Tiering storage class is designed to optimize storage costs by // automatically moving data to the most cost-effective storage access tier, -// without additional operational overhead. S3 Intelligent-Tiering delivers -// automatic cost savings by moving data between access tiers, when access patterns -// change. The S3 Intelligent-Tiering storage class is suitable for objects larger -// than 128 KB that you plan to store for at least 30 days. If the size of an -// object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the frequent access tier rates in -// the S3 Intelligent-Tiering storage class. If you delete an object before the end -// of the 30-day minimum storage duration period, you are charged for 30 days. For -// more information, see Storage class for automatically optimizing frequently and -// infrequently accessed objects +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in two low latency and high throughput access +// tiers. For data that can be accessed asynchronously, you can choose to activate +// automatic archiving capabilities within the S3 Intelligent-Tiering storage +// class. The S3 Intelligent-Tiering storage class is the ideal storage class for +// data with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 KB, +// it is not eligible for auto-tiering. Smaller objects can be stored, but they are +// always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering +// storage class. For more information, see Storage class for automatically +// optimizing frequently and infrequently accessed objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). // Operations related to GetBucketIntelligentTieringConfiguration include: // diff --git a/service/s3/api_op_GetObject.go b/service/s3/api_op_GetObject.go index 2784e083924..6b900c141d3 100644 --- a/service/s3/api_op_GetObject.go +++ b/service/s3/api_op_GetObject.go @@ -43,15 +43,15 @@ import ( // about restoring archived objects, see Restoring Archived Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). // Encryption request headers, like x-amz-server-side-encryption, should not be -// sent for GET requests if your object uses server-side encryption with CMKs -// stored in Amazon Web Services KMS (SSE-KMS) or server-side encryption with -// Amazon S3–managed encryption keys (SSE-S3). If your object does use these types -// of keys, you’ll get an HTTP 400 BadRequest error. If you encrypt an object by -// using server-side encryption with customer-provided encryption keys (SSE-C) when -// you store the object in Amazon S3, then when you GET the object, you must use -// the following headers: +// sent for GET requests if your object uses server-side encryption with KMS keys +// (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys +// (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 +// BadRequest error. If you encrypt an object by using server-side encryption with +// customer-provided encryption keys (SSE-C) when you store the object in Amazon +// S3, then when you GET the object, you must use the following headers: // -// * x-amz-server-side-encryption-customer-algorithm +// * +// x-amz-server-side-encryption-customer-algorithm // // * // x-amz-server-side-encryption-customer-key @@ -165,9 +165,11 @@ type GetObjectInput struct { // the access point ARN in place of the bucket name. For more information about // access point ARNs, see Using access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts + // in the Amazon S3 User Guide. When using an Object Lambda access point the // hostname takes the form + // AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. When using this + // action with Amazon S3 on Outposts, you must direct requests to the S3 on + // Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using // this action using S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts bucket ARN in place of the bucket name. For more @@ -364,8 +366,8 @@ type GetObjectOutput struct { SSECustomerKeyMD5 *string // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) - // that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 diff --git a/service/s3/api_op_HeadObject.go b/service/s3/api_op_HeadObject.go index e904a5fe414..a2f414713b2 100644 --- a/service/s3/api_op_HeadObject.go +++ b/service/s3/api_op_HeadObject.go @@ -44,16 +44,16 @@ import ( // // * // Encryption request headers, like x-amz-server-side-encryption, should not be -// sent for GET requests if your object uses server-side encryption with CMKs -// stored in Amazon Web Services KMS (SSE-KMS) or server-side encryption with -// Amazon S3–managed encryption keys (SSE-S3). If your object does use these types -// of keys, you’ll get an HTTP 400 BadRequest error. +// sent for GET requests if your object uses server-side encryption with KMS keys +// (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys +// (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 +// BadRequest error. // -// * The last modified property -// in this case is the creation date of the object. +// * The last modified property in this case is the creation +// date of the object. // -// Request headers are limited to -// 8 KB in size. For more information, see Common Request Headers +// Request headers are limited to 8 KB in size. For more +// information, see Common Request Headers // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). // Consider the following when using request headers: // @@ -352,15 +352,14 @@ type HeadObjectOutput struct { SSECustomerKeyMD5 *string // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) - // that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. SSEKMSKeyId *string // If the object is stored using server-side encryption either with an Amazon Web - // Services KMS customer master key (CMK) or an Amazon S3-managed encryption key, - // the response includes this header with the value of the server-side encryption - // algorithm used when storing this object in Amazon S3 (for example, AES256, - // aws:kms). + // Services KMS key or an Amazon S3-managed encryption key, the response includes + // this header with the value of the server-side encryption algorithm used when + // storing this object in Amazon S3 (for example, AES256, aws:kms). ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header diff --git a/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go index fec55c34a43..9f830dfbc71 100644 --- a/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go +++ b/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go @@ -15,16 +15,17 @@ import ( // Lists the S3 Intelligent-Tiering configuration from the specified bucket. The S3 // Intelligent-Tiering storage class is designed to optimize storage costs by // automatically moving data to the most cost-effective storage access tier, -// without additional operational overhead. S3 Intelligent-Tiering delivers -// automatic cost savings by moving data between access tiers, when access patterns -// change. The S3 Intelligent-Tiering storage class is suitable for objects larger -// than 128 KB that you plan to store for at least 30 days. If the size of an -// object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the frequent access tier rates in -// the S3 Intelligent-Tiering storage class. If you delete an object before the end -// of the 30-day minimum storage duration period, you are charged for 30 days. For -// more information, see Storage class for automatically optimizing frequently and -// infrequently accessed objects +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in two low latency and high throughput access +// tiers. For data that can be accessed asynchronously, you can choose to activate +// automatic archiving capabilities within the S3 Intelligent-Tiering storage +// class. The S3 Intelligent-Tiering storage class is the ideal storage class for +// data with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 KB, +// it is not eligible for auto-tiering. Smaller objects can be stored, but they are +// always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering +// storage class. For more information, see Storage class for automatically +// optimizing frequently and infrequently accessed objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). // Operations related to ListBucketIntelligentTieringConfigurations include: // diff --git a/service/s3/api_op_PutBucketEncryption.go b/service/s3/api_op_PutBucketEncryption.go index 66a585d8315..556b2f51a47 100644 --- a/service/s3/api_op_PutBucketEncryption.go +++ b/service/s3/api_op_PutBucketEncryption.go @@ -14,10 +14,10 @@ import ( // This action uses the encryption subresource to configure default encryption and // Amazon S3 Bucket Key for an existing bucket. Default encryption for a bucket can -// use server-side encryption with Amazon S3-managed keys (SSE-S3) or Amazon Web -// Services KMS customer master keys (SSE-KMS). If you specify default encryption -// using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information -// about default encryption, see Amazon S3 default bucket encryption +// use server-side encryption with Amazon S3-managed keys (SSE-S3) or customer +// managed keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can +// also configure Amazon S3 Bucket Key. For information about default encryption, +// see Amazon S3 default bucket encryption // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the // Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 // Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in @@ -59,9 +59,9 @@ func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncry type PutBucketEncryptionInput struct { // Specifies default encryption for a bucket using server-side encryption with - // Amazon S3-managed keys (SSE-S3) or customer master keys stored in Amazon Web - // Services KMS (SSE-KMS). For information about the Amazon S3 default encryption - // feature, see Amazon S3 Default Bucket Encryption + // Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For + // information about the Amazon S3 default encryption feature, see Amazon S3 + // Default Bucket Encryption // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the // Amazon S3 User Guide. // diff --git a/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go index 1e29af44c10..094817f1351 100644 --- a/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go +++ b/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go @@ -16,16 +16,17 @@ import ( // have up to 1,000 S3 Intelligent-Tiering configurations per bucket. The S3 // Intelligent-Tiering storage class is designed to optimize storage costs by // automatically moving data to the most cost-effective storage access tier, -// without additional operational overhead. S3 Intelligent-Tiering delivers -// automatic cost savings by moving data between access tiers, when access patterns -// change. The S3 Intelligent-Tiering storage class is suitable for objects larger -// than 128 KB that you plan to store for at least 30 days. If the size of an -// object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the frequent access tier rates in -// the S3 Intelligent-Tiering storage class. If you delete an object before the end -// of the 30-day minimum storage duration period, you are charged for 30 days. For -// more information, see Storage class for automatically optimizing frequently and -// infrequently accessed objects +// without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in two low latency and high throughput access +// tiers. For data that can be accessed asynchronously, you can choose to activate +// automatic archiving capabilities within the S3 Intelligent-Tiering storage +// class. The S3 Intelligent-Tiering storage class is the ideal storage class for +// data with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 KB, +// it is not eligible for auto-tiering. Smaller objects can be stored, but they are +// always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering +// storage class. For more information, see Storage class for automatically +// optimizing frequently and infrequently accessed objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). // Operations related to PutBucketIntelligentTieringConfiguration include: // diff --git a/service/s3/api_op_PutBucketMetricsConfiguration.go b/service/s3/api_op_PutBucketMetricsConfiguration.go index b9725aa7f42..22a5da0ed10 100644 --- a/service/s3/api_op_PutBucketMetricsConfiguration.go +++ b/service/s3/api_op_PutBucketMetricsConfiguration.go @@ -34,8 +34,8 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) // // * -// PutBucketMetricsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// GetBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) // // * // ListBucketMetricsConfigurations diff --git a/service/s3/api_op_PutBucketReplication.go b/service/s3/api_op_PutBucketReplication.go index 0aff99944ef..d1378e733b0 100644 --- a/service/s3/api_op_PutBucketReplication.go +++ b/service/s3/api_op_PutBucketReplication.go @@ -34,12 +34,11 @@ import ( // For information about enabling versioning on a bucket, see Using Versioning // (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). Handling // Replication of Encrypted Objects By default, Amazon S3 doesn't replicate objects -// that are stored at rest using server-side encryption with CMKs stored in Amazon -// Web Services KMS. To replicate Amazon Web Services KMS-encrypted objects, add -// the following: SourceSelectionCriteria, SseKmsEncryptedObjects, Status, +// that are stored at rest using server-side encryption with KMS keys. To replicate +// Amazon Web Services KMS-encrypted objects, add the following: +// SourceSelectionCriteria, SseKmsEncryptedObjects, Status, // EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication -// configuration, see Replicating Objects Created with SSE Using CMKs stored in -// Amazon Web Services KMS +// configuration, see Replicating Objects Created with SSE Using KMS keys // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). // For information on PutBucketReplication errors, see List of replication-related // error codes diff --git a/service/s3/api_op_PutObject.go b/service/s3/api_op_PutObject.go index f43aba1d79c..2e94e7eb537 100644 --- a/service/s3/api_op_PutObject.go +++ b/service/s3/api_op_PutObject.go @@ -254,13 +254,11 @@ type PutObjectInput struct { // If x-amz-server-side-encryption is present and has the value of aws:kms, this // header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetrical customer managed customer master key (CMK) - // that was used for the object. If you specify - // x-amz-server-side-encryption:aws:kms, but do not provide + // (Amazon Web Services KMS) symmetrical customer managed key that was used for the + // object. If you specify x-amz-server-side-encryption:aws:kms, but do not provide // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web - // Services managed CMK in Amazon Web Services to protect the data. If the KMS key - // does not exist in the same account issuing the command, you must use the full - // ARN and not just the ID. + // Services managed key to protect the data. If the KMS key does not exist in the + // same account issuing the command, you must use the full ARN and not just the ID. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 @@ -336,14 +334,14 @@ type PutObjectOutput struct { // If x-amz-server-side-encryption is present and has the value of aws:kms, this // header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) - // that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. SSEKMSKeyId *string // If you specified server-side encryption either with an Amazon Web Services KMS - // customer master key (CMK) or Amazon S3-managed encryption key in your PUT - // request, the response includes this header. It confirms the encryption algorithm - // that Amazon S3 used to encrypt the object. + // key or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 used + // to encrypt the object. ServerSideEncryption types.ServerSideEncryption // Version of the object. diff --git a/service/s3/api_op_UploadPart.go b/service/s3/api_op_UploadPart.go index 275f57cb538..374c96d3dd9 100644 --- a/service/s3/api_op_UploadPart.go +++ b/service/s3/api_op_UploadPart.go @@ -231,8 +231,8 @@ type UploadPartOutput struct { SSECustomerKeyMD5 *string // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) - // was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key was used for the + // object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 diff --git a/service/s3/api_op_UploadPartCopy.go b/service/s3/api_op_UploadPartCopy.go index 2d88c2b8bea..82abba5d2bf 100644 --- a/service/s3/api_op_UploadPartCopy.go +++ b/service/s3/api_op_UploadPartCopy.go @@ -308,8 +308,8 @@ type UploadPartCopyOutput struct { SSECustomerKeyMD5 *string // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) - // that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for the + // object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 diff --git a/service/s3/api_op_WriteGetObjectResponse.go b/service/s3/api_op_WriteGetObjectResponse.go index 6384a9edcf2..d5a1008ad72 100644 --- a/service/s3/api_op_WriteGetObjectResponse.go +++ b/service/s3/api_op_WriteGetObjectResponse.go @@ -18,8 +18,8 @@ import ( ) // Passes transformed objects to a GetObject operation when using Object Lambda -// Access Points. For information about Object Lambda Access Points, see -// Transforming objects with Object Lambda Access Points +// access points. For information about Object Lambda access points, see +// Transforming objects with Object Lambda access points // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) // in the Amazon S3 User Guide. This operation supports metadata that can be // returned by GetObject @@ -38,7 +38,7 @@ import ( // (PII) and decompress S3 objects. These Lambda functions are available in the // Amazon Web Services Serverless Application Repository, and can be selected // through the Amazon Web Services Management Console when you create your Object -// Lambda Access Point. Example 1: PII Access Control - This Lambda function uses +// Lambda access point. Example 1: PII Access Control - This Lambda function uses // Amazon Comprehend, a natural language processing (NLP) service using machine // learning to find insights and relationships in text. It automatically detects // personally identifiable information (PII) such as names, addresses, dates, @@ -194,8 +194,8 @@ type WriteGetObjectResponseInput struct { SSECustomerKeyMD5 *string // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) - // that was used for stored in Amazon S3 object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // stored in Amazon S3 object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing requested object in diff --git a/service/s3/deserializers.go b/service/s3/deserializers.go index f11b0b68dcd..1b853c461cc 100644 --- a/service/s3/deserializers.go +++ b/service/s3/deserializers.go @@ -15961,6 +15961,19 @@ func awsRestxml_deserializeDocumentMetricsAndOperator(v **types.MetricsAndOperat originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("AccessPointArn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessPointArn = ptr.String(xtv) + } + case strings.EqualFold("Prefix", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -16139,6 +16152,22 @@ func awsRestxml_deserializeDocumentMetricsFilter(v *types.MetricsFilter, decoder originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("AccessPointArn", t.Name.Local): + var mv string + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + mv = xtv + } + uv = &types.MetricsFilterMemberAccessPointArn{Value: mv} + memberFound = true + case strings.EqualFold("And", t.Name.Local): var mv types.MetricsAndOperator nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) diff --git a/service/s3/serializers.go b/service/s3/serializers.go index 6fdae571830..269eae2beb0 100644 --- a/service/s3/serializers.go +++ b/service/s3/serializers.go @@ -10023,6 +10023,17 @@ func awsRestxml_serializeDocumentMetrics(v *types.Metrics, value smithyxml.Value func awsRestxml_serializeDocumentMetricsAndOperator(v *types.MetricsAndOperator, value smithyxml.Value) error { defer value.Close() + if v.AccessPointArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessPointArn", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.AccessPointArn) + } if v.Prefix != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ @@ -10082,6 +10093,17 @@ func awsRestxml_serializeDocumentMetricsConfiguration(v *types.MetricsConfigurat func awsRestxml_serializeDocumentMetricsFilter(v types.MetricsFilter, value smithyxml.Value) error { defer value.Close() switch uv := v.(type) { + case *types.MetricsFilterMemberAccessPointArn: + customMemberNameAttr := []smithyxml.Attr{} + customMemberName := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "AccessPointArn", + }, + Attr: customMemberNameAttr, + } + av := value.MemberElement(customMemberName) + av.String(uv.Value) + case *types.MetricsFilterMemberAnd: customMemberNameAttr := []smithyxml.Attr{} customMemberName := smithyxml.StartElement{ diff --git a/service/s3/types/types.go b/service/s3/types/types.go index c06c1dfb1bd..ac6d546bbae 100644 --- a/service/s3/types/types.go +++ b/service/s3/types/types.go @@ -615,9 +615,9 @@ type Encryption struct { KMSContext *string // If the encryption type is aws:kms, this optional value specifies the ID of the - // symmetric customer managed Amazon Web Services KMS CMK to use for encryption of - // job results. Amazon S3 only supports symmetric CMKs. For more information, see - // Using symmetric and asymmetric keys + // symmetric customer managed key to use for encryption of job results. Amazon S3 + // only supports symmetric keys. For more information, see Using symmetric and + // asymmetric keys // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. KMSKeyId *string @@ -2198,6 +2198,9 @@ type Metrics struct { // all of the predicates in order for the filter to apply. type MetricsAndOperator struct { + // The access point ARN used when evaluating an AND predicate. + AccessPointArn *string + // The prefix used when evaluating an AND predicate. Prefix *string @@ -2211,9 +2214,8 @@ type MetricsAndOperator struct { // by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an // existing metrics configuration, note that this is a full replacement of the // existing metrics configuration. If you don't include the elements you want to -// keep, they are erased. For more information, see PUT Bucket metrics -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) -// in the Amazon S3 API Reference. +// keep, they are erased. For more information, see PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html). type MetricsConfiguration struct { // The ID used to identify the metrics configuration. @@ -2222,20 +2224,23 @@ type MetricsConfiguration struct { Id *string // Specifies a metrics configuration filter. The metrics configuration will only - // include objects that meet the filter's criteria. A filter must be a prefix, a - // tag, or a conjunction (MetricsAndOperator). + // include objects that meet the filter's criteria. A filter must be a prefix, an + // object tag, an access point ARN, or a conjunction (MetricsAndOperator). Filter MetricsFilter noSmithyDocumentSerde } // Specifies a metrics configuration filter. The metrics configuration only -// includes objects that meet the filter's criteria. A filter must be a prefix, a -// tag, or a conjunction (MetricsAndOperator). +// includes objects that meet the filter's criteria. A filter must be a prefix, an +// object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more +// information, see PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). // // The following types satisfy this interface: // MetricsFilterMemberPrefix // MetricsFilterMemberTag +// MetricsFilterMemberAccessPointArn // MetricsFilterMemberAnd type MetricsFilter interface { isMetricsFilter() @@ -2259,6 +2264,15 @@ type MetricsFilterMemberTag struct { func (*MetricsFilterMemberTag) isMetricsFilter() {} +// The access point ARN used when evaluating a metrics filter. +type MetricsFilterMemberAccessPointArn struct { + Value string + + noSmithyDocumentSerde +} + +func (*MetricsFilterMemberAccessPointArn) isMetricsFilter() {} + // A conjunction (logical AND) of predicates, which is used in evaluating a metrics // filter. The operator must have at least two predicates, and an object must match // all of the predicates in order for the filter to apply. @@ -2830,8 +2844,8 @@ type ReplicationRule struct { // A container that describes additional filters for identifying the source objects // that you want to replicate. You can choose to enable or disable the replication // of these objects. Currently, Amazon S3 supports only the filter that you can - // specify for objects created with server-side encryption using a customer master - // key (CMK) stored in Amazon Web Services Key Management Service (SSE-KMS). + // specify for objects created with server-side encryption using a customer managed + // key stored in Amazon Web Services Key Management Service (SSE-KMS). SourceSelectionCriteria *SourceSelectionCriteria noSmithyDocumentSerde @@ -3144,8 +3158,8 @@ type ServerSideEncryptionRule struct { // A container that describes additional filters for identifying the source objects // that you want to replicate. You can choose to enable or disable the replication // of these objects. Currently, Amazon S3 supports only the filter that you can -// specify for objects created with server-side encryption using a customer master -// key (CMK) stored in Amazon Web Services Key Management Service (SSE-KMS). +// specify for objects created with server-side encryption using a customer managed +// key stored in Amazon Web Services Key Management Service (SSE-KMS). type SourceSelectionCriteria struct { // A filter that you can specify for selections for modifications on replicas. @@ -3169,8 +3183,8 @@ type SourceSelectionCriteria struct { type SSEKMS struct { // Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web - // Services KMS) symmetric customer managed customer master key (CMK) to use for - // encrypting inventory reports. + // Services KMS) symmetric customer managed key to use for encrypting inventory + // reports. // // This member is required. KeyId *string diff --git a/service/s3/types/types_exported_test.go b/service/s3/types/types_exported_test.go index c5e7ab01d7f..7c0885fd476 100644 --- a/service/s3/types/types_exported_test.go +++ b/service/s3/types/types_exported_test.go @@ -63,6 +63,9 @@ func ExampleMetricsFilter_outputUsage() { var union types.MetricsFilter // type switches can be used to check the union value switch v := union.(type) { + case *types.MetricsFilterMemberAccessPointArn: + _ = v.Value // Value is string + case *types.MetricsFilterMemberAnd: _ = v.Value // Value is types.MetricsAndOperator @@ -81,6 +84,7 @@ func ExampleMetricsFilter_outputUsage() { } } +var _ *string var _ *string var _ *types.Tag var _ *types.MetricsAndOperator diff --git a/service/s3/validators.go b/service/s3/validators.go index b0ab492b016..615f459b511 100644 --- a/service/s3/validators.go +++ b/service/s3/validators.go @@ -4804,14 +4804,14 @@ func validateOpPutBucketAclInput(v *PutBucketAclInput) error { return nil } invalidParams := smithy.InvalidParamsError{Context: "PutBucketAclInput"} - if v.Bucket == nil { - invalidParams.Add(smithy.NewErrParamRequired("Bucket")) - } if v.AccessControlPolicy != nil { if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil { invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError)) } } + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } if invalidParams.Len() > 0 { return invalidParams } else { @@ -5178,17 +5178,17 @@ func validateOpPutObjectAclInput(v *PutObjectAclInput) error { return nil } invalidParams := smithy.InvalidParamsError{Context: "PutObjectAclInput"} + if v.AccessControlPolicy != nil { + if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError)) + } + } if v.Bucket == nil { invalidParams.Add(smithy.NewErrParamRequired("Bucket")) } if v.Key == nil { invalidParams.Add(smithy.NewErrParamRequired("Key")) } - if v.AccessControlPolicy != nil { - if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError)) - } - } if invalidParams.Len() > 0 { return invalidParams } else { diff --git a/service/sagemaker/api_op_CreateAction.go b/service/sagemaker/api_op_CreateAction.go index e4aa3b3ed85..89e20084cff 100644 --- a/service/sagemaker/api_op_CreateAction.go +++ b/service/sagemaker/api_op_CreateAction.go @@ -16,10 +16,6 @@ import ( // action involves at least one input or output artifact. For more information, see // Amazon SageMaker ML Lineage Tracking // (https://docs.aws.amazon.com/sagemaker/latest/dg/lineage-tracking.html). -// CreateAction can only be invoked from within an SageMaker managed environment. -// This includes SageMaker training jobs, processing jobs, transform jobs, and -// SageMaker notebooks. A call to CreateAction from outside one of these -// environments results in an error. func (c *Client) CreateAction(ctx context.Context, params *CreateActionInput, optFns ...func(*Options)) (*CreateActionOutput, error) { if params == nil { params = &CreateActionInput{} diff --git a/service/sagemaker/api_op_CreateArtifact.go b/service/sagemaker/api_op_CreateArtifact.go index 3b5229fb874..f25b6409be8 100644 --- a/service/sagemaker/api_op_CreateArtifact.go +++ b/service/sagemaker/api_op_CreateArtifact.go @@ -16,10 +16,6 @@ import ( // the ECR registry path of an image. For more information, see Amazon SageMaker ML // Lineage Tracking // (https://docs.aws.amazon.com/sagemaker/latest/dg/lineage-tracking.html). -// CreateArtifact can only be invoked from within an SageMaker managed environment. -// This includes SageMaker training jobs, processing jobs, transform jobs, and -// SageMaker notebooks. A call to CreateArtifact from outside one of these -// environments results in an error. func (c *Client) CreateArtifact(ctx context.Context, params *CreateArtifactInput, optFns ...func(*Options)) (*CreateArtifactOutput, error) { if params == nil { params = &CreateArtifactInput{} diff --git a/service/sagemaker/api_op_CreateContext.go b/service/sagemaker/api_op_CreateContext.go index 825415da916..f0247745c73 100644 --- a/service/sagemaker/api_op_CreateContext.go +++ b/service/sagemaker/api_op_CreateContext.go @@ -16,10 +16,6 @@ import ( // endpoint and a model package. For more information, see Amazon SageMaker ML // Lineage Tracking // (https://docs.aws.amazon.com/sagemaker/latest/dg/lineage-tracking.html). -// CreateContext can only be invoked from within an SageMaker managed environment. -// This includes SageMaker training jobs, processing jobs, transform jobs, and -// SageMaker notebooks. A call to CreateContext from outside one of these -// environments results in an error. func (c *Client) CreateContext(ctx context.Context, params *CreateContextInput, optFns ...func(*Options)) (*CreateContextOutput, error) { if params == nil { params = &CreateContextInput{} diff --git a/service/sagemaker/api_op_CreateDomain.go b/service/sagemaker/api_op_CreateDomain.go index a1750f47120..96e53aed3f5 100644 --- a/service/sagemaker/api_op_CreateDomain.go +++ b/service/sagemaker/api_op_CreateDomain.go @@ -21,9 +21,9 @@ import ( // directory within the EFS volume for notebooks, Git repositories, and data files. // SageMaker uses the Amazon Web Services Key Management Service (Amazon Web // Services KMS) to encrypt the EFS volume attached to the domain with an Amazon -// Web Services managed customer master key (CMK) by default. For more control, you -// can specify a customer managed CMK. For more information, see Protect Data at -// Rest Using Encryption +// Web Services managed key by default. For more control, you can specify a +// customer managed key. For more information, see Protect Data at Rest Using +// Encryption // (https://docs.aws.amazon.com/sagemaker/latest/dg/encryption-at-rest.html). VPC // configuration All SageMaker Studio traffic between the domain and the EFS volume // is through the specified VPC and subnets. For other Studio traffic, you can @@ -110,8 +110,8 @@ type CreateDomainInput struct { HomeEfsFileSystemKmsKeyId *string // SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached to the - // domain with an Amazon Web Services managed customer master key (CMK) by default. - // For more control, specify a customer managed CMK. + // domain with an Amazon Web Services managed key by default. For more control, + // specify a customer managed key. KmsKeyId *string // Tags to associated with the Domain. Each tag consists of a key and an optional diff --git a/service/sagemaker/api_op_CreateEdgePackagingJob.go b/service/sagemaker/api_op_CreateEdgePackagingJob.go index 9f3511d3209..e2e39e5ffdb 100644 --- a/service/sagemaker/api_op_CreateEdgePackagingJob.go +++ b/service/sagemaker/api_op_CreateEdgePackagingJob.go @@ -64,7 +64,8 @@ type CreateEdgePackagingJobInput struct { // This member is required. RoleArn *string - // The CMK to use when encrypting the EBS volume the edge packaging job runs on. + // The Amazon Web Services KMS key to use when encrypting the EBS volume the edge + // packaging job runs on. ResourceKey *string // Creates tags for the packaging job. diff --git a/service/sagemaker/api_op_CreateEndpointConfig.go b/service/sagemaker/api_op_CreateEndpointConfig.go index 0c8bb19fb14..5457f1fe74b 100644 --- a/service/sagemaker/api_op_CreateEndpointConfig.go +++ b/service/sagemaker/api_op_CreateEndpointConfig.go @@ -23,13 +23,9 @@ import ( // you also assign a VariantWeight to specify how much traffic you want to allocate // to each model. For example, suppose that you want to host two models, A and B, // and you assign traffic weight 2 for model A and 1 for model B. Amazon SageMaker -// distributes two-thirds of the traffic to Model A, and one-third to model B. For -// an example that calls this method when deploying a model to Amazon SageMaker -// hosting services, see Deploy the Model to Amazon SageMaker Hosting Services -// (Amazon Web Services SDK for Python (Boto 3)). -// (https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto) -// When you call CreateEndpoint, a load call is made to DynamoDB to verify that -// your endpoint configuration exists. When you read data from a DynamoDB table +// distributes two-thirds of the traffic to Model A, and one-third to model B. When +// you call CreateEndpoint, a load call is made to DynamoDB to verify that your +// endpoint configuration exists. When you read data from a DynamoDB table // supporting Eventually Consistent Reads // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html), // the response might not reflect the results of a recently completed write diff --git a/service/sagemaker/api_op_CreateStudioLifecycleConfig.go b/service/sagemaker/api_op_CreateStudioLifecycleConfig.go new file mode 100644 index 00000000000..a82f6b0ed8c --- /dev/null +++ b/service/sagemaker/api_op_CreateStudioLifecycleConfig.go @@ -0,0 +1,137 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sagemaker + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sagemaker/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a new Studio Lifecycle Configuration. +func (c *Client) CreateStudioLifecycleConfig(ctx context.Context, params *CreateStudioLifecycleConfigInput, optFns ...func(*Options)) (*CreateStudioLifecycleConfigOutput, error) { + if params == nil { + params = &CreateStudioLifecycleConfigInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateStudioLifecycleConfig", params, optFns, c.addOperationCreateStudioLifecycleConfigMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateStudioLifecycleConfigOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateStudioLifecycleConfigInput struct { + + // The App type that the Lifecycle Configuration is attached to. + // + // This member is required. + StudioLifecycleConfigAppType types.StudioLifecycleConfigAppType + + // The content of your Studio Lifecycle Configuration script. This content must be + // base64 encoded. + // + // This member is required. + StudioLifecycleConfigContent *string + + // The name of the Studio Lifecycle Configuration to create. + // + // This member is required. + StudioLifecycleConfigName *string + + // Tags to be associated with the Lifecycle Configuration. Each tag consists of a + // key and an optional value. Tag keys must be unique per resource. Tags are + // searchable using the Search API. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateStudioLifecycleConfigOutput struct { + + // The ARN of your created Lifecycle Configuration. + StudioLifecycleConfigArn *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateStudioLifecycleConfigMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateStudioLifecycleConfig{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateStudioLifecycleConfig{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateStudioLifecycleConfigValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateStudioLifecycleConfig(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateStudioLifecycleConfig(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sagemaker", + OperationName: "CreateStudioLifecycleConfig", + } +} diff --git a/service/sagemaker/api_op_DeleteStudioLifecycleConfig.go b/service/sagemaker/api_op_DeleteStudioLifecycleConfig.go new file mode 100644 index 00000000000..08dd28198fd --- /dev/null +++ b/service/sagemaker/api_op_DeleteStudioLifecycleConfig.go @@ -0,0 +1,119 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sagemaker + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the Studio Lifecycle Configuration. In order to delete the Lifecycle +// Configuration, there must be no running apps using the Lifecycle Configuration. +// You must also remove the Lifecycle Configuration from UserSettings in all +// Domains and UserProfiles. +func (c *Client) DeleteStudioLifecycleConfig(ctx context.Context, params *DeleteStudioLifecycleConfigInput, optFns ...func(*Options)) (*DeleteStudioLifecycleConfigOutput, error) { + if params == nil { + params = &DeleteStudioLifecycleConfigInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteStudioLifecycleConfig", params, optFns, c.addOperationDeleteStudioLifecycleConfigMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteStudioLifecycleConfigOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteStudioLifecycleConfigInput struct { + + // The name of the Studio Lifecycle Configuration to delete. + // + // This member is required. + StudioLifecycleConfigName *string + + noSmithyDocumentSerde +} + +type DeleteStudioLifecycleConfigOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteStudioLifecycleConfigMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteStudioLifecycleConfig{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteStudioLifecycleConfig{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteStudioLifecycleConfigValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteStudioLifecycleConfig(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteStudioLifecycleConfig(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sagemaker", + OperationName: "DeleteStudioLifecycleConfig", + } +} diff --git a/service/sagemaker/api_op_DescribeAction.go b/service/sagemaker/api_op_DescribeAction.go index 955dcfd4946..7c4beb6c0f5 100644 --- a/service/sagemaker/api_op_DescribeAction.go +++ b/service/sagemaker/api_op_DescribeAction.go @@ -49,8 +49,8 @@ type DescribeActionOutput struct { // The type of the action. ActionType *string - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. CreatedBy *types.UserContext // When the action was created. @@ -59,8 +59,8 @@ type DescribeActionOutput struct { // The description of the action. Description *string - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. LastModifiedBy *types.UserContext // When the action was last modified. diff --git a/service/sagemaker/api_op_DescribeArtifact.go b/service/sagemaker/api_op_DescribeArtifact.go index bed774c2530..63c0cbde6d5 100644 --- a/service/sagemaker/api_op_DescribeArtifact.go +++ b/service/sagemaker/api_op_DescribeArtifact.go @@ -49,15 +49,15 @@ type DescribeArtifactOutput struct { // The type of the artifact. ArtifactType *string - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. CreatedBy *types.UserContext // When the artifact was created. CreationTime *time.Time - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. LastModifiedBy *types.UserContext // When the artifact was last modified. diff --git a/service/sagemaker/api_op_DescribeContext.go b/service/sagemaker/api_op_DescribeContext.go index 395b9618194..15cf14fe27a 100644 --- a/service/sagemaker/api_op_DescribeContext.go +++ b/service/sagemaker/api_op_DescribeContext.go @@ -49,8 +49,8 @@ type DescribeContextOutput struct { // The type of the context. ContextType *string - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. CreatedBy *types.UserContext // When the context was created. @@ -59,8 +59,8 @@ type DescribeContextOutput struct { // The description of the context. Description *string - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. LastModifiedBy *types.UserContext // When the context was last modified. diff --git a/service/sagemaker/api_op_DescribeDomain.go b/service/sagemaker/api_op_DescribeDomain.go index 4731a97a57c..24614e2e757 100644 --- a/service/sagemaker/api_op_DescribeDomain.go +++ b/service/sagemaker/api_op_DescribeDomain.go @@ -80,7 +80,7 @@ type DescribeDomainOutput struct { // Deprecated: This property is deprecated, use KmsKeyId instead. HomeEfsFileSystemKmsKeyId *string - // The Amazon Web Services KMS customer managed CMK used to encrypt the EFS volume + // The Amazon Web Services KMS customer managed key used to encrypt the EFS volume // attached to the domain. KmsKeyId *string diff --git a/service/sagemaker/api_op_DescribeEdgePackagingJob.go b/service/sagemaker/api_op_DescribeEdgePackagingJob.go index 9913ddaeb9c..a3751083c2e 100644 --- a/service/sagemaker/api_op_DescribeEdgePackagingJob.go +++ b/service/sagemaker/api_op_DescribeEdgePackagingJob.go @@ -86,7 +86,8 @@ type DescribeEdgePackagingJobOutput struct { // The output of a SageMaker Edge Manager deployable resource. PresetDeploymentOutput *types.EdgePresetDeploymentOutput - // The CMK to use when encrypting the EBS volume the job run on. + // The Amazon Web Services KMS key to use when encrypting the EBS volume the job + // run on. ResourceKey *string // The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to diff --git a/service/sagemaker/api_op_DescribeModelPackage.go b/service/sagemaker/api_op_DescribeModelPackage.go index 0cf84e4d66f..6f5e6561e6b 100644 --- a/service/sagemaker/api_op_DescribeModelPackage.go +++ b/service/sagemaker/api_op_DescribeModelPackage.go @@ -77,16 +77,16 @@ type DescribeModelPackageOutput struct { // Marketplace. CertifyForMarketplace bool - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. CreatedBy *types.UserContext // Details about inference jobs that can be run with models based on this model // package. InferenceSpecification *types.InferenceSpecification - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. LastModifiedBy *types.UserContext // The last time the model package was modified. diff --git a/service/sagemaker/api_op_DescribeModelPackageGroup.go b/service/sagemaker/api_op_DescribeModelPackageGroup.go index 461d89468ba..1c572045c98 100644 --- a/service/sagemaker/api_op_DescribeModelPackageGroup.go +++ b/service/sagemaker/api_op_DescribeModelPackageGroup.go @@ -40,8 +40,8 @@ type DescribeModelPackageGroupInput struct { type DescribeModelPackageGroupOutput struct { - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. // // This member is required. CreatedBy *types.UserContext diff --git a/service/sagemaker/api_op_DescribePipeline.go b/service/sagemaker/api_op_DescribePipeline.go index ea82c5cab92..fe269805f5f 100644 --- a/service/sagemaker/api_op_DescribePipeline.go +++ b/service/sagemaker/api_op_DescribePipeline.go @@ -40,15 +40,15 @@ type DescribePipelineInput struct { type DescribePipelineOutput struct { - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. CreatedBy *types.UserContext // The time when the pipeline was created. CreationTime *time.Time - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. LastModifiedBy *types.UserContext // The time when the pipeline was last modified. diff --git a/service/sagemaker/api_op_DescribePipelineExecution.go b/service/sagemaker/api_op_DescribePipelineExecution.go index ef3c4e01610..d413a989303 100644 --- a/service/sagemaker/api_op_DescribePipelineExecution.go +++ b/service/sagemaker/api_op_DescribePipelineExecution.go @@ -40,8 +40,8 @@ type DescribePipelineExecutionInput struct { type DescribePipelineExecutionOutput struct { - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. CreatedBy *types.UserContext // The time when the pipeline execution was created. @@ -50,8 +50,8 @@ type DescribePipelineExecutionOutput struct { // If the execution failed, a message describing why. FailureReason *string - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. LastModifiedBy *types.UserContext // The time when the pipeline execution was modified last. diff --git a/service/sagemaker/api_op_DescribeProject.go b/service/sagemaker/api_op_DescribeProject.go index 1b1e530a6ff..15fe49f521a 100644 --- a/service/sagemaker/api_op_DescribeProject.go +++ b/service/sagemaker/api_op_DescribeProject.go @@ -72,8 +72,8 @@ type DescribeProjectOutput struct { // This member is required. ServiceCatalogProvisioningDetails *types.ServiceCatalogProvisioningDetails - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. CreatedBy *types.UserContext // The description of the project. diff --git a/service/sagemaker/api_op_DescribeStudioLifecycleConfig.go b/service/sagemaker/api_op_DescribeStudioLifecycleConfig.go new file mode 100644 index 00000000000..7e7a1706ae0 --- /dev/null +++ b/service/sagemaker/api_op_DescribeStudioLifecycleConfig.go @@ -0,0 +1,138 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sagemaker + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sagemaker/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Describes the Studio Lifecycle Configuration. +func (c *Client) DescribeStudioLifecycleConfig(ctx context.Context, params *DescribeStudioLifecycleConfigInput, optFns ...func(*Options)) (*DescribeStudioLifecycleConfigOutput, error) { + if params == nil { + params = &DescribeStudioLifecycleConfigInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeStudioLifecycleConfig", params, optFns, c.addOperationDescribeStudioLifecycleConfigMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeStudioLifecycleConfigOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeStudioLifecycleConfigInput struct { + + // The name of the Studio Lifecycle Configuration to describe. + // + // This member is required. + StudioLifecycleConfigName *string + + noSmithyDocumentSerde +} + +type DescribeStudioLifecycleConfigOutput struct { + + // The creation time of the Studio Lifecycle Configuration. + CreationTime *time.Time + + // This value is equivalent to CreationTime because Studio Lifecycle Configurations + // are immutable. + LastModifiedTime *time.Time + + // The App type that the Lifecycle Configuration is attached to. + StudioLifecycleConfigAppType types.StudioLifecycleConfigAppType + + // The ARN of the Lifecycle Configuration to describe. + StudioLifecycleConfigArn *string + + // The content of your Studio Lifecycle Configuration script. + StudioLifecycleConfigContent *string + + // The name of the Studio Lifecycle Configuration that is described. + StudioLifecycleConfigName *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeStudioLifecycleConfigMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeStudioLifecycleConfig{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeStudioLifecycleConfig{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeStudioLifecycleConfigValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeStudioLifecycleConfig(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeStudioLifecycleConfig(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sagemaker", + OperationName: "DescribeStudioLifecycleConfig", + } +} diff --git a/service/sagemaker/api_op_DescribeTrialComponent.go b/service/sagemaker/api_op_DescribeTrialComponent.go index 63ac05ed0fb..a3132c5ed0c 100644 --- a/service/sagemaker/api_op_DescribeTrialComponent.go +++ b/service/sagemaker/api_op_DescribeTrialComponent.go @@ -40,7 +40,7 @@ type DescribeTrialComponentInput struct { type DescribeTrialComponentOutput struct { - // Who created the component. + // Who created the trial component. CreatedBy *types.UserContext // When the component was created. diff --git a/service/sagemaker/api_op_ListModels.go b/service/sagemaker/api_op_ListModels.go index 06d3d7ae990..84be0370c8e 100644 --- a/service/sagemaker/api_op_ListModels.go +++ b/service/sagemaker/api_op_ListModels.go @@ -41,8 +41,8 @@ type ListModelsInput struct { // The maximum number of models to return in the response. MaxResults *int32 - // A string in the training job name. This filter returns only models in the - // training job whose name contains the specified string. + // A string in the model name. This filter returns only models whose name contains + // the specified string. NameContains *string // If the response to a previous ListModels request was truncated, the response diff --git a/service/sagemaker/api_op_ListStudioLifecycleConfigs.go b/service/sagemaker/api_op_ListStudioLifecycleConfigs.go new file mode 100644 index 00000000000..e80ef5dde2e --- /dev/null +++ b/service/sagemaker/api_op_ListStudioLifecycleConfigs.go @@ -0,0 +1,247 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sagemaker + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/sagemaker/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Lists the Studio Lifecycle Configurations in your Amazon Web Services Account. +func (c *Client) ListStudioLifecycleConfigs(ctx context.Context, params *ListStudioLifecycleConfigsInput, optFns ...func(*Options)) (*ListStudioLifecycleConfigsOutput, error) { + if params == nil { + params = &ListStudioLifecycleConfigsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListStudioLifecycleConfigs", params, optFns, c.addOperationListStudioLifecycleConfigsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListStudioLifecycleConfigsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListStudioLifecycleConfigsInput struct { + + // A parameter to search for the App Type to which the Lifecycle Configuration is + // attached. + AppTypeEquals types.StudioLifecycleConfigAppType + + // A filter that returns only Lifecycle Configurations created on or after the + // specified time. + CreationTimeAfter *time.Time + + // A filter that returns only Lifecycle Configurations created on or before the + // specified time. + CreationTimeBefore *time.Time + + // The maximum number of Studio Lifecycle Configurations to return in the response. + // The default value is 10. + MaxResults *int32 + + // A filter that returns only Lifecycle Configurations modified after the specified + // time. + ModifiedTimeAfter *time.Time + + // A filter that returns only Lifecycle Configurations modified before the + // specified time. + ModifiedTimeBefore *time.Time + + // A string in the Lifecycle Configuration name. This filter returns only Lifecycle + // Configurations whose name contains the specified string. + NameContains *string + + // If the previous call to ListStudioLifecycleConfigs didn't return the full set of + // Lifecycle Configurations, the call returns a token for getting the next set of + // Lifecycle Configurations. + NextToken *string + + // The property used to sort results. The default value is CreationTime. + SortBy types.StudioLifecycleConfigSortKey + + // The sort order. The default value is Descending. + SortOrder types.SortOrder + + noSmithyDocumentSerde +} + +type ListStudioLifecycleConfigsOutput struct { + + // A token for getting the next set of actions, if there are any. + NextToken *string + + // A list of Lifecycle Configurations and their properties. + StudioLifecycleConfigs []types.StudioLifecycleConfigDetails + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListStudioLifecycleConfigsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListStudioLifecycleConfigs{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListStudioLifecycleConfigs{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListStudioLifecycleConfigs(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListStudioLifecycleConfigsAPIClient is a client that implements the +// ListStudioLifecycleConfigs operation. +type ListStudioLifecycleConfigsAPIClient interface { + ListStudioLifecycleConfigs(context.Context, *ListStudioLifecycleConfigsInput, ...func(*Options)) (*ListStudioLifecycleConfigsOutput, error) +} + +var _ ListStudioLifecycleConfigsAPIClient = (*Client)(nil) + +// ListStudioLifecycleConfigsPaginatorOptions is the paginator options for +// ListStudioLifecycleConfigs +type ListStudioLifecycleConfigsPaginatorOptions struct { + // The maximum number of Studio Lifecycle Configurations to return in the response. + // The default value is 10. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListStudioLifecycleConfigsPaginator is a paginator for +// ListStudioLifecycleConfigs +type ListStudioLifecycleConfigsPaginator struct { + options ListStudioLifecycleConfigsPaginatorOptions + client ListStudioLifecycleConfigsAPIClient + params *ListStudioLifecycleConfigsInput + nextToken *string + firstPage bool +} + +// NewListStudioLifecycleConfigsPaginator returns a new +// ListStudioLifecycleConfigsPaginator +func NewListStudioLifecycleConfigsPaginator(client ListStudioLifecycleConfigsAPIClient, params *ListStudioLifecycleConfigsInput, optFns ...func(*ListStudioLifecycleConfigsPaginatorOptions)) *ListStudioLifecycleConfigsPaginator { + if params == nil { + params = &ListStudioLifecycleConfigsInput{} + } + + options := ListStudioLifecycleConfigsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListStudioLifecycleConfigsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListStudioLifecycleConfigsPaginator) HasMorePages() bool { + return p.firstPage || p.nextToken != nil +} + +// NextPage retrieves the next ListStudioLifecycleConfigs page. +func (p *ListStudioLifecycleConfigsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListStudioLifecycleConfigsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListStudioLifecycleConfigs(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListStudioLifecycleConfigs(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sagemaker", + OperationName: "ListStudioLifecycleConfigs", + } +} diff --git a/service/sagemaker/api_op_RetryPipelineExecution.go b/service/sagemaker/api_op_RetryPipelineExecution.go new file mode 100644 index 00000000000..9c530202fa8 --- /dev/null +++ b/service/sagemaker/api_op_RetryPipelineExecution.go @@ -0,0 +1,163 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sagemaker + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retry the execution of the pipeline. +func (c *Client) RetryPipelineExecution(ctx context.Context, params *RetryPipelineExecutionInput, optFns ...func(*Options)) (*RetryPipelineExecutionOutput, error) { + if params == nil { + params = &RetryPipelineExecutionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RetryPipelineExecution", params, optFns, c.addOperationRetryPipelineExecutionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RetryPipelineExecutionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RetryPipelineExecutionInput struct { + + // A unique, case-sensitive identifier that you provide to ensure the idempotency + // of the operation. An idempotent operation completes no more than once. + // + // This member is required. + ClientRequestToken *string + + // The Amazon Resource Name (ARN) of the pipeline execution. + // + // This member is required. + PipelineExecutionArn *string + + noSmithyDocumentSerde +} + +type RetryPipelineExecutionOutput struct { + + // The Amazon Resource Name (ARN) of the pipeline execution. + PipelineExecutionArn *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRetryPipelineExecutionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpRetryPipelineExecution{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpRetryPipelineExecution{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addIdempotencyToken_opRetryPipelineExecutionMiddleware(stack, options); err != nil { + return err + } + if err = addOpRetryPipelineExecutionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRetryPipelineExecution(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpRetryPipelineExecution struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpRetryPipelineExecution) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpRetryPipelineExecution) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*RetryPipelineExecutionInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *RetryPipelineExecutionInput ") + } + + if input.ClientRequestToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientRequestToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opRetryPipelineExecutionMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpRetryPipelineExecution{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opRetryPipelineExecution(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sagemaker", + OperationName: "RetryPipelineExecution", + } +} diff --git a/service/sagemaker/api_op_StartPipelineExecution.go b/service/sagemaker/api_op_StartPipelineExecution.go index 9714a123dab..f94ba00e801 100644 --- a/service/sagemaker/api_op_StartPipelineExecution.go +++ b/service/sagemaker/api_op_StartPipelineExecution.go @@ -31,7 +31,7 @@ func (c *Client) StartPipelineExecution(ctx context.Context, params *StartPipeli type StartPipelineExecutionInput struct { // A unique, case-sensitive identifier that you provide to ensure the idempotency - // of the operation. An idempotent operation completes no more than one time. + // of the operation. An idempotent operation completes no more than once. // // This member is required. ClientRequestToken *string diff --git a/service/sagemaker/api_op_StopPipelineExecution.go b/service/sagemaker/api_op_StopPipelineExecution.go index af49a982418..05884d2557b 100644 --- a/service/sagemaker/api_op_StopPipelineExecution.go +++ b/service/sagemaker/api_op_StopPipelineExecution.go @@ -45,7 +45,7 @@ func (c *Client) StopPipelineExecution(ctx context.Context, params *StopPipeline type StopPipelineExecutionInput struct { // A unique, case-sensitive identifier that you provide to ensure the idempotency - // of the operation. An idempotent operation completes no more than one time. + // of the operation. An idempotent operation completes no more than once. // // This member is required. ClientRequestToken *string diff --git a/service/sagemaker/deserializers.go b/service/sagemaker/deserializers.go index af07261e871..8f7bd42608c 100644 --- a/service/sagemaker/deserializers.go +++ b/service/sagemaker/deserializers.go @@ -4477,6 +4477,117 @@ func awsAwsjson11_deserializeOpErrorCreateProject(response *smithyhttp.Response, } } +type awsAwsjson11_deserializeOpCreateStudioLifecycleConfig struct { +} + +func (*awsAwsjson11_deserializeOpCreateStudioLifecycleConfig) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateStudioLifecycleConfig) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateStudioLifecycleConfig(response, &metadata) + } + output := &CreateStudioLifecycleConfigOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateStudioLifecycleConfigOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateStudioLifecycleConfig(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ResourceInUse", errorCode): + return awsAwsjson11_deserializeErrorResourceInUse(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsjson11_deserializeOpCreateTrainingJob struct { } @@ -8242,14 +8353,14 @@ func awsAwsjson11_deserializeOpErrorDeleteProject(response *smithyhttp.Response, } } -type awsAwsjson11_deserializeOpDeleteTags struct { +type awsAwsjson11_deserializeOpDeleteStudioLifecycleConfig struct { } -func (*awsAwsjson11_deserializeOpDeleteTags) ID() string { +func (*awsAwsjson11_deserializeOpDeleteStudioLifecycleConfig) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDeleteTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDeleteStudioLifecycleConfig) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8263,43 +8374,21 @@ func (m *awsAwsjson11_deserializeOpDeleteTags) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeleteTags(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteStudioLifecycleConfig(response, &metadata) } - output := &DeleteTagsOutput{} + output := &DeleteStudioLifecycleConfigOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDeleteTagsOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDeleteTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDeleteStudioLifecycleConfig(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8340,6 +8429,12 @@ func awsAwsjson11_deserializeOpErrorDeleteTags(response *smithyhttp.Response, me } switch { + case strings.EqualFold("ResourceInUse", errorCode): + return awsAwsjson11_deserializeErrorResourceInUse(response, errorBody) + + case strings.EqualFold("ResourceNotFound", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -8350,14 +8445,14 @@ func awsAwsjson11_deserializeOpErrorDeleteTags(response *smithyhttp.Response, me } } -type awsAwsjson11_deserializeOpDeleteTrial struct { +type awsAwsjson11_deserializeOpDeleteTags struct { } -func (*awsAwsjson11_deserializeOpDeleteTrial) ID() string { +func (*awsAwsjson11_deserializeOpDeleteTags) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDeleteTrial) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDeleteTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8371,9 +8466,9 @@ func (m *awsAwsjson11_deserializeOpDeleteTrial) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeleteTrial(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteTags(response, &metadata) } - output := &DeleteTrialOutput{} + output := &DeleteTagsOutput{} out.Result = output var buff [1024]byte @@ -8393,7 +8488,7 @@ func (m *awsAwsjson11_deserializeOpDeleteTrial) HandleDeserialize(ctx context.Co return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDeleteTrialOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDeleteTagsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8407,7 +8502,7 @@ func (m *awsAwsjson11_deserializeOpDeleteTrial) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDeleteTrial(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDeleteTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8448,9 +8543,6 @@ func awsAwsjson11_deserializeOpErrorDeleteTrial(response *smithyhttp.Response, m } switch { - case strings.EqualFold("ResourceNotFound", errorCode): - return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) - default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -8461,14 +8553,14 @@ func awsAwsjson11_deserializeOpErrorDeleteTrial(response *smithyhttp.Response, m } } -type awsAwsjson11_deserializeOpDeleteTrialComponent struct { +type awsAwsjson11_deserializeOpDeleteTrial struct { } -func (*awsAwsjson11_deserializeOpDeleteTrialComponent) ID() string { +func (*awsAwsjson11_deserializeOpDeleteTrial) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDeleteTrialComponent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDeleteTrial) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8482,9 +8574,9 @@ func (m *awsAwsjson11_deserializeOpDeleteTrialComponent) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeleteTrialComponent(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteTrial(response, &metadata) } - output := &DeleteTrialComponentOutput{} + output := &DeleteTrialOutput{} out.Result = output var buff [1024]byte @@ -8504,7 +8596,7 @@ func (m *awsAwsjson11_deserializeOpDeleteTrialComponent) HandleDeserialize(ctx c return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDeleteTrialComponentOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDeleteTrialOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8518,7 +8610,7 @@ func (m *awsAwsjson11_deserializeOpDeleteTrialComponent) HandleDeserialize(ctx c return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDeleteTrialComponent(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDeleteTrial(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8572,14 +8664,14 @@ func awsAwsjson11_deserializeOpErrorDeleteTrialComponent(response *smithyhttp.Re } } -type awsAwsjson11_deserializeOpDeleteUserProfile struct { +type awsAwsjson11_deserializeOpDeleteTrialComponent struct { } -func (*awsAwsjson11_deserializeOpDeleteUserProfile) ID() string { +func (*awsAwsjson11_deserializeOpDeleteTrialComponent) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDeleteUserProfile) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDeleteTrialComponent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8593,21 +8685,43 @@ func (m *awsAwsjson11_deserializeOpDeleteUserProfile) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeleteUserProfile(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteTrialComponent(response, &metadata) } - output := &DeleteUserProfileOutput{} + output := &DeleteTrialComponentOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteTrialComponentOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDeleteUserProfile(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDeleteTrialComponent(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8648,9 +8762,6 @@ func awsAwsjson11_deserializeOpErrorDeleteUserProfile(response *smithyhttp.Respo } switch { - case strings.EqualFold("ResourceInUse", errorCode): - return awsAwsjson11_deserializeErrorResourceInUse(response, errorBody) - case strings.EqualFold("ResourceNotFound", errorCode): return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) @@ -8664,14 +8775,14 @@ func awsAwsjson11_deserializeOpErrorDeleteUserProfile(response *smithyhttp.Respo } } -type awsAwsjson11_deserializeOpDeleteWorkforce struct { +type awsAwsjson11_deserializeOpDeleteUserProfile struct { } -func (*awsAwsjson11_deserializeOpDeleteWorkforce) ID() string { +func (*awsAwsjson11_deserializeOpDeleteUserProfile) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDeleteWorkforce) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDeleteUserProfile) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8685,43 +8796,21 @@ func (m *awsAwsjson11_deserializeOpDeleteWorkforce) HandleDeserialize(ctx contex } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeleteWorkforce(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteUserProfile(response, &metadata) } - output := &DeleteWorkforceOutput{} + output := &DeleteUserProfileOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDeleteWorkforceOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDeleteWorkforce(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDeleteUserProfile(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8762,6 +8851,12 @@ func awsAwsjson11_deserializeOpErrorDeleteWorkforce(response *smithyhttp.Respons } switch { + case strings.EqualFold("ResourceInUse", errorCode): + return awsAwsjson11_deserializeErrorResourceInUse(response, errorBody) + + case strings.EqualFold("ResourceNotFound", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -8772,14 +8867,14 @@ func awsAwsjson11_deserializeOpErrorDeleteWorkforce(response *smithyhttp.Respons } } -type awsAwsjson11_deserializeOpDeleteWorkteam struct { +type awsAwsjson11_deserializeOpDeleteWorkforce struct { } -func (*awsAwsjson11_deserializeOpDeleteWorkteam) ID() string { +func (*awsAwsjson11_deserializeOpDeleteWorkforce) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDeleteWorkteam) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDeleteWorkforce) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8793,9 +8888,9 @@ func (m *awsAwsjson11_deserializeOpDeleteWorkteam) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeleteWorkteam(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteWorkforce(response, &metadata) } - output := &DeleteWorkteamOutput{} + output := &DeleteWorkforceOutput{} out.Result = output var buff [1024]byte @@ -8815,7 +8910,7 @@ func (m *awsAwsjson11_deserializeOpDeleteWorkteam) HandleDeserialize(ctx context return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDeleteWorkteamOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDeleteWorkforceOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8829,96 +8924,7 @@ func (m *awsAwsjson11_deserializeOpDeleteWorkteam) HandleDeserialize(ctx context return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDeleteWorkteam(response *smithyhttp.Response, metadata *middleware.Metadata) error { - var errorBuffer bytes.Buffer - if _, err := io.Copy(&errorBuffer, response.Body); err != nil { - return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} - } - errorBody := bytes.NewReader(errorBuffer.Bytes()) - - errorCode := "UnknownError" - errorMessage := errorCode - - code := response.Header.Get("X-Amzn-ErrorType") - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) - } - - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - code, message, err := restjson.GetErrorInfo(decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } - - errorBody.Seek(0, io.SeekStart) - if len(code) != 0 { - errorCode = restjson.SanitizeErrorCode(code) - } - if len(message) != 0 { - errorMessage = message - } - - switch { - case strings.EqualFold("ResourceLimitExceeded", errorCode): - return awsAwsjson11_deserializeErrorResourceLimitExceeded(response, errorBody) - - default: - genericError := &smithy.GenericAPIError{ - Code: errorCode, - Message: errorMessage, - } - return genericError - - } -} - -type awsAwsjson11_deserializeOpDeregisterDevices struct { -} - -func (*awsAwsjson11_deserializeOpDeregisterDevices) ID() string { - return "OperationDeserializer" -} - -func (m *awsAwsjson11_deserializeOpDeregisterDevices) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( - out middleware.DeserializeOutput, metadata middleware.Metadata, err error, -) { - out, metadata, err = next.HandleDeserialize(ctx, in) - if err != nil { - return out, metadata, err - } - - response, ok := out.RawResponse.(*smithyhttp.Response) - if !ok { - return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} - } - - if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDeregisterDevices(response, &metadata) - } - output := &DeregisterDevicesOutput{} - out.Result = output - - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } - } - - return out, metadata, err -} - -func awsAwsjson11_deserializeOpErrorDeregisterDevices(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDeleteWorkforce(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8969,14 +8975,14 @@ func awsAwsjson11_deserializeOpErrorDeregisterDevices(response *smithyhttp.Respo } } -type awsAwsjson11_deserializeOpDescribeAction struct { +type awsAwsjson11_deserializeOpDeleteWorkteam struct { } -func (*awsAwsjson11_deserializeOpDescribeAction) ID() string { +func (*awsAwsjson11_deserializeOpDeleteWorkteam) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeAction) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDeleteWorkteam) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8990,9 +8996,9 @@ func (m *awsAwsjson11_deserializeOpDescribeAction) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeAction(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteWorkteam(response, &metadata) } - output := &DescribeActionOutput{} + output := &DeleteWorkteamOutput{} out.Result = output var buff [1024]byte @@ -9012,7 +9018,7 @@ func (m *awsAwsjson11_deserializeOpDescribeAction) HandleDeserialize(ctx context return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeActionOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDeleteWorkteamOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9026,7 +9032,7 @@ func (m *awsAwsjson11_deserializeOpDescribeAction) HandleDeserialize(ctx context return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeAction(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDeleteWorkteam(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9067,8 +9073,8 @@ func awsAwsjson11_deserializeOpErrorDescribeAction(response *smithyhttp.Response } switch { - case strings.EqualFold("ResourceNotFound", errorCode): - return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) + case strings.EqualFold("ResourceLimitExceeded", errorCode): + return awsAwsjson11_deserializeErrorResourceLimitExceeded(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -9080,14 +9086,14 @@ func awsAwsjson11_deserializeOpErrorDescribeAction(response *smithyhttp.Response } } -type awsAwsjson11_deserializeOpDescribeAlgorithm struct { +type awsAwsjson11_deserializeOpDeregisterDevices struct { } -func (*awsAwsjson11_deserializeOpDescribeAlgorithm) ID() string { +func (*awsAwsjson11_deserializeOpDeregisterDevices) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeAlgorithm) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDeregisterDevices) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9101,43 +9107,21 @@ func (m *awsAwsjson11_deserializeOpDescribeAlgorithm) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeAlgorithm(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDeregisterDevices(response, &metadata) } - output := &DescribeAlgorithmOutput{} + output := &DeregisterDevicesOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentDescribeAlgorithmOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeAlgorithm(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDeregisterDevices(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9188,14 +9172,14 @@ func awsAwsjson11_deserializeOpErrorDescribeAlgorithm(response *smithyhttp.Respo } } -type awsAwsjson11_deserializeOpDescribeApp struct { +type awsAwsjson11_deserializeOpDescribeAction struct { } -func (*awsAwsjson11_deserializeOpDescribeApp) ID() string { +func (*awsAwsjson11_deserializeOpDescribeAction) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeApp) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeAction) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9209,9 +9193,9 @@ func (m *awsAwsjson11_deserializeOpDescribeApp) HandleDeserialize(ctx context.Co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeApp(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeAction(response, &metadata) } - output := &DescribeAppOutput{} + output := &DescribeActionOutput{} out.Result = output var buff [1024]byte @@ -9231,7 +9215,7 @@ func (m *awsAwsjson11_deserializeOpDescribeApp) HandleDeserialize(ctx context.Co return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeAppOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeActionOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9245,7 +9229,7 @@ func (m *awsAwsjson11_deserializeOpDescribeApp) HandleDeserialize(ctx context.Co return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeApp(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeAction(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9299,14 +9283,14 @@ func awsAwsjson11_deserializeOpErrorDescribeApp(response *smithyhttp.Response, m } } -type awsAwsjson11_deserializeOpDescribeAppImageConfig struct { +type awsAwsjson11_deserializeOpDescribeAlgorithm struct { } -func (*awsAwsjson11_deserializeOpDescribeAppImageConfig) ID() string { +func (*awsAwsjson11_deserializeOpDescribeAlgorithm) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeAppImageConfig) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeAlgorithm) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9320,9 +9304,9 @@ func (m *awsAwsjson11_deserializeOpDescribeAppImageConfig) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeAppImageConfig(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeAlgorithm(response, &metadata) } - output := &DescribeAppImageConfigOutput{} + output := &DescribeAlgorithmOutput{} out.Result = output var buff [1024]byte @@ -9342,7 +9326,7 @@ func (m *awsAwsjson11_deserializeOpDescribeAppImageConfig) HandleDeserialize(ctx return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeAppImageConfigOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeAlgorithmOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9356,7 +9340,7 @@ func (m *awsAwsjson11_deserializeOpDescribeAppImageConfig) HandleDeserialize(ctx return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeAppImageConfig(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeAlgorithm(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9397,9 +9381,6 @@ func awsAwsjson11_deserializeOpErrorDescribeAppImageConfig(response *smithyhttp. } switch { - case strings.EqualFold("ResourceNotFound", errorCode): - return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) - default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -9410,14 +9391,14 @@ func awsAwsjson11_deserializeOpErrorDescribeAppImageConfig(response *smithyhttp. } } -type awsAwsjson11_deserializeOpDescribeArtifact struct { +type awsAwsjson11_deserializeOpDescribeApp struct { } -func (*awsAwsjson11_deserializeOpDescribeArtifact) ID() string { +func (*awsAwsjson11_deserializeOpDescribeApp) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeArtifact) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeApp) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9431,9 +9412,9 @@ func (m *awsAwsjson11_deserializeOpDescribeArtifact) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeArtifact(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeApp(response, &metadata) } - output := &DescribeArtifactOutput{} + output := &DescribeAppOutput{} out.Result = output var buff [1024]byte @@ -9453,7 +9434,7 @@ func (m *awsAwsjson11_deserializeOpDescribeArtifact) HandleDeserialize(ctx conte return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeArtifactOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeAppOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9467,7 +9448,7 @@ func (m *awsAwsjson11_deserializeOpDescribeArtifact) HandleDeserialize(ctx conte return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeArtifact(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeApp(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9521,14 +9502,14 @@ func awsAwsjson11_deserializeOpErrorDescribeArtifact(response *smithyhttp.Respon } } -type awsAwsjson11_deserializeOpDescribeAutoMLJob struct { +type awsAwsjson11_deserializeOpDescribeAppImageConfig struct { } -func (*awsAwsjson11_deserializeOpDescribeAutoMLJob) ID() string { +func (*awsAwsjson11_deserializeOpDescribeAppImageConfig) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeAutoMLJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeAppImageConfig) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9542,9 +9523,9 @@ func (m *awsAwsjson11_deserializeOpDescribeAutoMLJob) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeAutoMLJob(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeAppImageConfig(response, &metadata) } - output := &DescribeAutoMLJobOutput{} + output := &DescribeAppImageConfigOutput{} out.Result = output var buff [1024]byte @@ -9564,7 +9545,7 @@ func (m *awsAwsjson11_deserializeOpDescribeAutoMLJob) HandleDeserialize(ctx cont return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeAutoMLJobOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeAppImageConfigOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9578,7 +9559,7 @@ func (m *awsAwsjson11_deserializeOpDescribeAutoMLJob) HandleDeserialize(ctx cont return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeAutoMLJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeAppImageConfig(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9632,14 +9613,14 @@ func awsAwsjson11_deserializeOpErrorDescribeAutoMLJob(response *smithyhttp.Respo } } -type awsAwsjson11_deserializeOpDescribeCodeRepository struct { +type awsAwsjson11_deserializeOpDescribeArtifact struct { } -func (*awsAwsjson11_deserializeOpDescribeCodeRepository) ID() string { +func (*awsAwsjson11_deserializeOpDescribeArtifact) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeCodeRepository) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeArtifact) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9653,9 +9634,9 @@ func (m *awsAwsjson11_deserializeOpDescribeCodeRepository) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeCodeRepository(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeArtifact(response, &metadata) } - output := &DescribeCodeRepositoryOutput{} + output := &DescribeArtifactOutput{} out.Result = output var buff [1024]byte @@ -9675,7 +9656,7 @@ func (m *awsAwsjson11_deserializeOpDescribeCodeRepository) HandleDeserialize(ctx return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeCodeRepositoryOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeArtifactOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9689,7 +9670,7 @@ func (m *awsAwsjson11_deserializeOpDescribeCodeRepository) HandleDeserialize(ctx return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeCodeRepository(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeArtifact(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -9730,6 +9711,9 @@ func awsAwsjson11_deserializeOpErrorDescribeCodeRepository(response *smithyhttp. } switch { + case strings.EqualFold("ResourceNotFound", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -9740,14 +9724,14 @@ func awsAwsjson11_deserializeOpErrorDescribeCodeRepository(response *smithyhttp. } } -type awsAwsjson11_deserializeOpDescribeCompilationJob struct { +type awsAwsjson11_deserializeOpDescribeAutoMLJob struct { } -func (*awsAwsjson11_deserializeOpDescribeCompilationJob) ID() string { +func (*awsAwsjson11_deserializeOpDescribeAutoMLJob) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeCompilationJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeAutoMLJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -9761,9 +9745,9 @@ func (m *awsAwsjson11_deserializeOpDescribeCompilationJob) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeCompilationJob(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeAutoMLJob(response, &metadata) } - output := &DescribeCompilationJobOutput{} + output := &DescribeAutoMLJobOutput{} out.Result = output var buff [1024]byte @@ -9783,7 +9767,7 @@ func (m *awsAwsjson11_deserializeOpDescribeCompilationJob) HandleDeserialize(ctx return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeCompilationJobOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeAutoMLJobOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -9797,7 +9781,226 @@ func (m *awsAwsjson11_deserializeOpDescribeCompilationJob) HandleDeserialize(ctx return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeCompilationJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeAutoMLJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ResourceNotFound", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeCodeRepository struct { +} + +func (*awsAwsjson11_deserializeOpDescribeCodeRepository) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeCodeRepository) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeCodeRepository(response, &metadata) + } + output := &DescribeCodeRepositoryOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeCodeRepositoryOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeCodeRepository(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeCompilationJob struct { +} + +func (*awsAwsjson11_deserializeOpDescribeCompilationJob) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeCompilationJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeCompilationJob(response, &metadata) + } + output := &DescribeCompilationJobOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeCompilationJobOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeCompilationJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13157,14 +13360,14 @@ func awsAwsjson11_deserializeOpErrorDescribeProject(response *smithyhttp.Respons } } -type awsAwsjson11_deserializeOpDescribeSubscribedWorkteam struct { +type awsAwsjson11_deserializeOpDescribeStudioLifecycleConfig struct { } -func (*awsAwsjson11_deserializeOpDescribeSubscribedWorkteam) ID() string { +func (*awsAwsjson11_deserializeOpDescribeStudioLifecycleConfig) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeSubscribedWorkteam) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeStudioLifecycleConfig) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13178,9 +13381,9 @@ func (m *awsAwsjson11_deserializeOpDescribeSubscribedWorkteam) HandleDeserialize } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeSubscribedWorkteam(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeStudioLifecycleConfig(response, &metadata) } - output := &DescribeSubscribedWorkteamOutput{} + output := &DescribeStudioLifecycleConfigOutput{} out.Result = output var buff [1024]byte @@ -13200,7 +13403,7 @@ func (m *awsAwsjson11_deserializeOpDescribeSubscribedWorkteam) HandleDeserialize return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeSubscribedWorkteamOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeStudioLifecycleConfigOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13214,7 +13417,7 @@ func (m *awsAwsjson11_deserializeOpDescribeSubscribedWorkteam) HandleDeserialize return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeSubscribedWorkteam(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeStudioLifecycleConfig(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13255,6 +13458,9 @@ func awsAwsjson11_deserializeOpErrorDescribeSubscribedWorkteam(response *smithyh } switch { + case strings.EqualFold("ResourceNotFound", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -13265,14 +13471,14 @@ func awsAwsjson11_deserializeOpErrorDescribeSubscribedWorkteam(response *smithyh } } -type awsAwsjson11_deserializeOpDescribeTrainingJob struct { +type awsAwsjson11_deserializeOpDescribeSubscribedWorkteam struct { } -func (*awsAwsjson11_deserializeOpDescribeTrainingJob) ID() string { +func (*awsAwsjson11_deserializeOpDescribeSubscribedWorkteam) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeTrainingJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeSubscribedWorkteam) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13286,9 +13492,9 @@ func (m *awsAwsjson11_deserializeOpDescribeTrainingJob) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeTrainingJob(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeSubscribedWorkteam(response, &metadata) } - output := &DescribeTrainingJobOutput{} + output := &DescribeSubscribedWorkteamOutput{} out.Result = output var buff [1024]byte @@ -13308,7 +13514,7 @@ func (m *awsAwsjson11_deserializeOpDescribeTrainingJob) HandleDeserialize(ctx co return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeTrainingJobOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeSubscribedWorkteamOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13322,7 +13528,7 @@ func (m *awsAwsjson11_deserializeOpDescribeTrainingJob) HandleDeserialize(ctx co return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeTrainingJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeSubscribedWorkteam(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13363,9 +13569,6 @@ func awsAwsjson11_deserializeOpErrorDescribeTrainingJob(response *smithyhttp.Res } switch { - case strings.EqualFold("ResourceNotFound", errorCode): - return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) - default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -13376,14 +13579,14 @@ func awsAwsjson11_deserializeOpErrorDescribeTrainingJob(response *smithyhttp.Res } } -type awsAwsjson11_deserializeOpDescribeTransformJob struct { +type awsAwsjson11_deserializeOpDescribeTrainingJob struct { } -func (*awsAwsjson11_deserializeOpDescribeTransformJob) ID() string { +func (*awsAwsjson11_deserializeOpDescribeTrainingJob) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpDescribeTransformJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpDescribeTrainingJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -13397,9 +13600,9 @@ func (m *awsAwsjson11_deserializeOpDescribeTransformJob) HandleDeserialize(ctx c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorDescribeTransformJob(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeTrainingJob(response, &metadata) } - output := &DescribeTransformJobOutput{} + output := &DescribeTrainingJobOutput{} out.Result = output var buff [1024]byte @@ -13419,7 +13622,7 @@ func (m *awsAwsjson11_deserializeOpDescribeTransformJob) HandleDeserialize(ctx c return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentDescribeTransformJobOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentDescribeTrainingJobOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -13433,7 +13636,118 @@ func (m *awsAwsjson11_deserializeOpDescribeTransformJob) HandleDeserialize(ctx c return out, metadata, err } -func awsAwsjson11_deserializeOpErrorDescribeTransformJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorDescribeTrainingJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ResourceNotFound", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeTransformJob struct { +} + +func (*awsAwsjson11_deserializeOpDescribeTransformJob) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeTransformJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeTransformJob(response, &metadata) + } + output := &DescribeTransformJobOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeTransformJobOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeTransformJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19469,14 +19783,14 @@ func awsAwsjson11_deserializeOpErrorListProjects(response *smithyhttp.Response, } } -type awsAwsjson11_deserializeOpListSubscribedWorkteams struct { +type awsAwsjson11_deserializeOpListStudioLifecycleConfigs struct { } -func (*awsAwsjson11_deserializeOpListSubscribedWorkteams) ID() string { +func (*awsAwsjson11_deserializeOpListStudioLifecycleConfigs) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListSubscribedWorkteams) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListStudioLifecycleConfigs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19490,9 +19804,9 @@ func (m *awsAwsjson11_deserializeOpListSubscribedWorkteams) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListSubscribedWorkteams(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListStudioLifecycleConfigs(response, &metadata) } - output := &ListSubscribedWorkteamsOutput{} + output := &ListStudioLifecycleConfigsOutput{} out.Result = output var buff [1024]byte @@ -19512,7 +19826,7 @@ func (m *awsAwsjson11_deserializeOpListSubscribedWorkteams) HandleDeserialize(ct return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListSubscribedWorkteamsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListStudioLifecycleConfigsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -19526,7 +19840,7 @@ func (m *awsAwsjson11_deserializeOpListSubscribedWorkteams) HandleDeserialize(ct return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListSubscribedWorkteams(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListStudioLifecycleConfigs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19567,6 +19881,9 @@ func awsAwsjson11_deserializeOpErrorListSubscribedWorkteams(response *smithyhttp } switch { + case strings.EqualFold("ResourceInUse", errorCode): + return awsAwsjson11_deserializeErrorResourceInUse(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -19577,14 +19894,14 @@ func awsAwsjson11_deserializeOpErrorListSubscribedWorkteams(response *smithyhttp } } -type awsAwsjson11_deserializeOpListTags struct { +type awsAwsjson11_deserializeOpListSubscribedWorkteams struct { } -func (*awsAwsjson11_deserializeOpListTags) ID() string { +func (*awsAwsjson11_deserializeOpListSubscribedWorkteams) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListSubscribedWorkteams) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19598,9 +19915,9 @@ func (m *awsAwsjson11_deserializeOpListTags) HandleDeserialize(ctx context.Conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListTags(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListSubscribedWorkteams(response, &metadata) } - output := &ListTagsOutput{} + output := &ListSubscribedWorkteamsOutput{} out.Result = output var buff [1024]byte @@ -19620,7 +19937,7 @@ func (m *awsAwsjson11_deserializeOpListTags) HandleDeserialize(ctx context.Conte return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListTagsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListSubscribedWorkteamsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -19634,7 +19951,7 @@ func (m *awsAwsjson11_deserializeOpListTags) HandleDeserialize(ctx context.Conte return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListSubscribedWorkteams(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19685,14 +20002,14 @@ func awsAwsjson11_deserializeOpErrorListTags(response *smithyhttp.Response, meta } } -type awsAwsjson11_deserializeOpListTrainingJobs struct { +type awsAwsjson11_deserializeOpListTags struct { } -func (*awsAwsjson11_deserializeOpListTrainingJobs) ID() string { +func (*awsAwsjson11_deserializeOpListTags) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListTrainingJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19706,9 +20023,9 @@ func (m *awsAwsjson11_deserializeOpListTrainingJobs) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListTrainingJobs(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListTags(response, &metadata) } - output := &ListTrainingJobsOutput{} + output := &ListTagsOutput{} out.Result = output var buff [1024]byte @@ -19728,7 +20045,7 @@ func (m *awsAwsjson11_deserializeOpListTrainingJobs) HandleDeserialize(ctx conte return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListTrainingJobsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListTagsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -19742,7 +20059,7 @@ func (m *awsAwsjson11_deserializeOpListTrainingJobs) HandleDeserialize(ctx conte return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListTrainingJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19793,14 +20110,14 @@ func awsAwsjson11_deserializeOpErrorListTrainingJobs(response *smithyhttp.Respon } } -type awsAwsjson11_deserializeOpListTrainingJobsForHyperParameterTuningJob struct { +type awsAwsjson11_deserializeOpListTrainingJobs struct { } -func (*awsAwsjson11_deserializeOpListTrainingJobsForHyperParameterTuningJob) ID() string { +func (*awsAwsjson11_deserializeOpListTrainingJobs) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListTrainingJobsForHyperParameterTuningJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListTrainingJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19814,9 +20131,9 @@ func (m *awsAwsjson11_deserializeOpListTrainingJobsForHyperParameterTuningJob) H } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListTrainingJobsForHyperParameterTuningJob(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListTrainingJobs(response, &metadata) } - output := &ListTrainingJobsForHyperParameterTuningJobOutput{} + output := &ListTrainingJobsOutput{} out.Result = output var buff [1024]byte @@ -19836,7 +20153,7 @@ func (m *awsAwsjson11_deserializeOpListTrainingJobsForHyperParameterTuningJob) H return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListTrainingJobsForHyperParameterTuningJobOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListTrainingJobsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -19850,7 +20167,7 @@ func (m *awsAwsjson11_deserializeOpListTrainingJobsForHyperParameterTuningJob) H return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListTrainingJobsForHyperParameterTuningJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListTrainingJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -19891,9 +20208,6 @@ func awsAwsjson11_deserializeOpErrorListTrainingJobsForHyperParameterTuningJob(r } switch { - case strings.EqualFold("ResourceNotFound", errorCode): - return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) - default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -19904,14 +20218,14 @@ func awsAwsjson11_deserializeOpErrorListTrainingJobsForHyperParameterTuningJob(r } } -type awsAwsjson11_deserializeOpListTransformJobs struct { +type awsAwsjson11_deserializeOpListTrainingJobsForHyperParameterTuningJob struct { } -func (*awsAwsjson11_deserializeOpListTransformJobs) ID() string { +func (*awsAwsjson11_deserializeOpListTrainingJobsForHyperParameterTuningJob) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListTransformJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListTrainingJobsForHyperParameterTuningJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -19925,9 +20239,9 @@ func (m *awsAwsjson11_deserializeOpListTransformJobs) HandleDeserialize(ctx cont } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListTransformJobs(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListTrainingJobsForHyperParameterTuningJob(response, &metadata) } - output := &ListTransformJobsOutput{} + output := &ListTrainingJobsForHyperParameterTuningJobOutput{} out.Result = output var buff [1024]byte @@ -19947,7 +20261,7 @@ func (m *awsAwsjson11_deserializeOpListTransformJobs) HandleDeserialize(ctx cont return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListTransformJobsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListTrainingJobsForHyperParameterTuningJobOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -19961,7 +20275,7 @@ func (m *awsAwsjson11_deserializeOpListTransformJobs) HandleDeserialize(ctx cont return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListTransformJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListTrainingJobsForHyperParameterTuningJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20002,6 +20316,9 @@ func awsAwsjson11_deserializeOpErrorListTransformJobs(response *smithyhttp.Respo } switch { + case strings.EqualFold("ResourceNotFound", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -20012,14 +20329,14 @@ func awsAwsjson11_deserializeOpErrorListTransformJobs(response *smithyhttp.Respo } } -type awsAwsjson11_deserializeOpListTrialComponents struct { +type awsAwsjson11_deserializeOpListTransformJobs struct { } -func (*awsAwsjson11_deserializeOpListTrialComponents) ID() string { +func (*awsAwsjson11_deserializeOpListTransformJobs) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListTrialComponents) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListTransformJobs) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20033,9 +20350,9 @@ func (m *awsAwsjson11_deserializeOpListTrialComponents) HandleDeserialize(ctx co } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListTrialComponents(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListTransformJobs(response, &metadata) } - output := &ListTrialComponentsOutput{} + output := &ListTransformJobsOutput{} out.Result = output var buff [1024]byte @@ -20055,7 +20372,7 @@ func (m *awsAwsjson11_deserializeOpListTrialComponents) HandleDeserialize(ctx co return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListTrialComponentsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListTransformJobsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20069,7 +20386,7 @@ func (m *awsAwsjson11_deserializeOpListTrialComponents) HandleDeserialize(ctx co return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListTrialComponents(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListTransformJobs(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20110,9 +20427,6 @@ func awsAwsjson11_deserializeOpErrorListTrialComponents(response *smithyhttp.Res } switch { - case strings.EqualFold("ResourceNotFound", errorCode): - return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) - default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -20123,14 +20437,14 @@ func awsAwsjson11_deserializeOpErrorListTrialComponents(response *smithyhttp.Res } } -type awsAwsjson11_deserializeOpListTrials struct { +type awsAwsjson11_deserializeOpListTrialComponents struct { } -func (*awsAwsjson11_deserializeOpListTrials) ID() string { +func (*awsAwsjson11_deserializeOpListTrialComponents) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListTrials) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListTrialComponents) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20144,9 +20458,9 @@ func (m *awsAwsjson11_deserializeOpListTrials) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListTrials(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListTrialComponents(response, &metadata) } - output := &ListTrialsOutput{} + output := &ListTrialComponentsOutput{} out.Result = output var buff [1024]byte @@ -20166,7 +20480,7 @@ func (m *awsAwsjson11_deserializeOpListTrials) HandleDeserialize(ctx context.Con return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListTrialsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListTrialComponentsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20180,7 +20494,7 @@ func (m *awsAwsjson11_deserializeOpListTrials) HandleDeserialize(ctx context.Con return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListTrials(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListTrialComponents(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20234,14 +20548,14 @@ func awsAwsjson11_deserializeOpErrorListTrials(response *smithyhttp.Response, me } } -type awsAwsjson11_deserializeOpListUserProfiles struct { +type awsAwsjson11_deserializeOpListTrials struct { } -func (*awsAwsjson11_deserializeOpListUserProfiles) ID() string { +func (*awsAwsjson11_deserializeOpListTrials) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListUserProfiles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListTrials) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20255,9 +20569,9 @@ func (m *awsAwsjson11_deserializeOpListUserProfiles) HandleDeserialize(ctx conte } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListUserProfiles(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListTrials(response, &metadata) } - output := &ListUserProfilesOutput{} + output := &ListTrialsOutput{} out.Result = output var buff [1024]byte @@ -20277,7 +20591,7 @@ func (m *awsAwsjson11_deserializeOpListUserProfiles) HandleDeserialize(ctx conte return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListUserProfilesOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListTrialsOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20291,7 +20605,7 @@ func (m *awsAwsjson11_deserializeOpListUserProfiles) HandleDeserialize(ctx conte return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListUserProfiles(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListTrials(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20332,6 +20646,9 @@ func awsAwsjson11_deserializeOpErrorListUserProfiles(response *smithyhttp.Respon } switch { + case strings.EqualFold("ResourceNotFound", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -20342,14 +20659,14 @@ func awsAwsjson11_deserializeOpErrorListUserProfiles(response *smithyhttp.Respon } } -type awsAwsjson11_deserializeOpListWorkforces struct { +type awsAwsjson11_deserializeOpListUserProfiles struct { } -func (*awsAwsjson11_deserializeOpListWorkforces) ID() string { +func (*awsAwsjson11_deserializeOpListUserProfiles) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListWorkforces) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListUserProfiles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20363,9 +20680,9 @@ func (m *awsAwsjson11_deserializeOpListWorkforces) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListWorkforces(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListUserProfiles(response, &metadata) } - output := &ListWorkforcesOutput{} + output := &ListUserProfilesOutput{} out.Result = output var buff [1024]byte @@ -20385,7 +20702,7 @@ func (m *awsAwsjson11_deserializeOpListWorkforces) HandleDeserialize(ctx context return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListWorkforcesOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListUserProfilesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20399,7 +20716,7 @@ func (m *awsAwsjson11_deserializeOpListWorkforces) HandleDeserialize(ctx context return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListWorkforces(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListUserProfiles(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20450,14 +20767,14 @@ func awsAwsjson11_deserializeOpErrorListWorkforces(response *smithyhttp.Response } } -type awsAwsjson11_deserializeOpListWorkteams struct { +type awsAwsjson11_deserializeOpListWorkforces struct { } -func (*awsAwsjson11_deserializeOpListWorkteams) ID() string { +func (*awsAwsjson11_deserializeOpListWorkforces) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpListWorkteams) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpListWorkforces) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20471,9 +20788,9 @@ func (m *awsAwsjson11_deserializeOpListWorkteams) HandleDeserialize(ctx context. } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorListWorkteams(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorListWorkforces(response, &metadata) } - output := &ListWorkteamsOutput{} + output := &ListWorkforcesOutput{} out.Result = output var buff [1024]byte @@ -20493,7 +20810,7 @@ func (m *awsAwsjson11_deserializeOpListWorkteams) HandleDeserialize(ctx context. return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentListWorkteamsOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentListWorkforcesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20507,7 +20824,115 @@ func (m *awsAwsjson11_deserializeOpListWorkteams) HandleDeserialize(ctx context. return out, metadata, err } -func awsAwsjson11_deserializeOpErrorListWorkteams(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorListWorkforces(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListWorkteams struct { +} + +func (*awsAwsjson11_deserializeOpListWorkteams) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListWorkteams) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListWorkteams(response, &metadata) + } + output := &ListWorkteamsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListWorkteamsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListWorkteams(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20866,14 +21291,14 @@ func awsAwsjson11_deserializeOpErrorRenderUiTemplate(response *smithyhttp.Respon } } -type awsAwsjson11_deserializeOpSearch struct { +type awsAwsjson11_deserializeOpRetryPipelineExecution struct { } -func (*awsAwsjson11_deserializeOpSearch) ID() string { +func (*awsAwsjson11_deserializeOpRetryPipelineExecution) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpSearch) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpRetryPipelineExecution) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20887,9 +21312,9 @@ func (m *awsAwsjson11_deserializeOpSearch) HandleDeserialize(ctx context.Context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorSearch(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorRetryPipelineExecution(response, &metadata) } - output := &SearchOutput{} + output := &RetryPipelineExecutionOutput{} out.Result = output var buff [1024]byte @@ -20909,7 +21334,7 @@ func (m *awsAwsjson11_deserializeOpSearch) HandleDeserialize(ctx context.Context return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentSearchOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentRetryPipelineExecutionOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -20923,7 +21348,7 @@ func (m *awsAwsjson11_deserializeOpSearch) HandleDeserialize(ctx context.Context return out, metadata, err } -func awsAwsjson11_deserializeOpErrorSearch(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorRetryPipelineExecution(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20964,6 +21389,15 @@ func awsAwsjson11_deserializeOpErrorSearch(response *smithyhttp.Response, metada } switch { + case strings.EqualFold("ConflictException", errorCode): + return awsAwsjson11_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("ResourceLimitExceeded", errorCode): + return awsAwsjson11_deserializeErrorResourceLimitExceeded(response, errorBody) + + case strings.EqualFold("ResourceNotFound", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -20974,14 +21408,14 @@ func awsAwsjson11_deserializeOpErrorSearch(response *smithyhttp.Response, metada } } -type awsAwsjson11_deserializeOpSendPipelineExecutionStepFailure struct { +type awsAwsjson11_deserializeOpSearch struct { } -func (*awsAwsjson11_deserializeOpSendPipelineExecutionStepFailure) ID() string { +func (*awsAwsjson11_deserializeOpSearch) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpSendPipelineExecutionStepFailure) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpSearch) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -20995,9 +21429,9 @@ func (m *awsAwsjson11_deserializeOpSendPipelineExecutionStepFailure) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorSendPipelineExecutionStepFailure(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorSearch(response, &metadata) } - output := &SendPipelineExecutionStepFailureOutput{} + output := &SearchOutput{} out.Result = output var buff [1024]byte @@ -21017,7 +21451,7 @@ func (m *awsAwsjson11_deserializeOpSendPipelineExecutionStepFailure) HandleDeser return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentSendPipelineExecutionStepFailureOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentSearchOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -21031,7 +21465,7 @@ func (m *awsAwsjson11_deserializeOpSendPipelineExecutionStepFailure) HandleDeser return out, metadata, err } -func awsAwsjson11_deserializeOpErrorSendPipelineExecutionStepFailure(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorSearch(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21072,12 +21506,6 @@ func awsAwsjson11_deserializeOpErrorSendPipelineExecutionStepFailure(response *s } switch { - case strings.EqualFold("ResourceLimitExceeded", errorCode): - return awsAwsjson11_deserializeErrorResourceLimitExceeded(response, errorBody) - - case strings.EqualFold("ResourceNotFound", errorCode): - return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) - default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -21088,14 +21516,14 @@ func awsAwsjson11_deserializeOpErrorSendPipelineExecutionStepFailure(response *s } } -type awsAwsjson11_deserializeOpSendPipelineExecutionStepSuccess struct { +type awsAwsjson11_deserializeOpSendPipelineExecutionStepFailure struct { } -func (*awsAwsjson11_deserializeOpSendPipelineExecutionStepSuccess) ID() string { +func (*awsAwsjson11_deserializeOpSendPipelineExecutionStepFailure) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpSendPipelineExecutionStepSuccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpSendPipelineExecutionStepFailure) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21109,9 +21537,9 @@ func (m *awsAwsjson11_deserializeOpSendPipelineExecutionStepSuccess) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorSendPipelineExecutionStepSuccess(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorSendPipelineExecutionStepFailure(response, &metadata) } - output := &SendPipelineExecutionStepSuccessOutput{} + output := &SendPipelineExecutionStepFailureOutput{} out.Result = output var buff [1024]byte @@ -21131,7 +21559,7 @@ func (m *awsAwsjson11_deserializeOpSendPipelineExecutionStepSuccess) HandleDeser return out, metadata, err } - err = awsAwsjson11_deserializeOpDocumentSendPipelineExecutionStepSuccessOutput(&output, shape) + err = awsAwsjson11_deserializeOpDocumentSendPipelineExecutionStepFailureOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -21145,7 +21573,7 @@ func (m *awsAwsjson11_deserializeOpSendPipelineExecutionStepSuccess) HandleDeser return out, metadata, err } -func awsAwsjson11_deserializeOpErrorSendPipelineExecutionStepSuccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorSendPipelineExecutionStepFailure(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21202,14 +21630,14 @@ func awsAwsjson11_deserializeOpErrorSendPipelineExecutionStepSuccess(response *s } } -type awsAwsjson11_deserializeOpStartMonitoringSchedule struct { +type awsAwsjson11_deserializeOpSendPipelineExecutionStepSuccess struct { } -func (*awsAwsjson11_deserializeOpStartMonitoringSchedule) ID() string { +func (*awsAwsjson11_deserializeOpSendPipelineExecutionStepSuccess) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpStartMonitoringSchedule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpSendPipelineExecutionStepSuccess) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21223,21 +21651,43 @@ func (m *awsAwsjson11_deserializeOpStartMonitoringSchedule) HandleDeserialize(ct } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorStartMonitoringSchedule(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorSendPipelineExecutionStepSuccess(response, &metadata) } - output := &StartMonitoringScheduleOutput{} + output := &SendPipelineExecutionStepSuccessOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentSendPipelineExecutionStepSuccessOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } + return out, metadata, err } return out, metadata, err } -func awsAwsjson11_deserializeOpErrorStartMonitoringSchedule(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorSendPipelineExecutionStepSuccess(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21278,6 +21728,9 @@ func awsAwsjson11_deserializeOpErrorStartMonitoringSchedule(response *smithyhttp } switch { + case strings.EqualFold("ResourceLimitExceeded", errorCode): + return awsAwsjson11_deserializeErrorResourceLimitExceeded(response, errorBody) + case strings.EqualFold("ResourceNotFound", errorCode): return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) @@ -21291,14 +21744,14 @@ func awsAwsjson11_deserializeOpErrorStartMonitoringSchedule(response *smithyhttp } } -type awsAwsjson11_deserializeOpStartNotebookInstance struct { +type awsAwsjson11_deserializeOpStartMonitoringSchedule struct { } -func (*awsAwsjson11_deserializeOpStartNotebookInstance) ID() string { +func (*awsAwsjson11_deserializeOpStartMonitoringSchedule) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpStartNotebookInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpStartMonitoringSchedule) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21312,9 +21765,9 @@ func (m *awsAwsjson11_deserializeOpStartNotebookInstance) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorStartNotebookInstance(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorStartMonitoringSchedule(response, &metadata) } - output := &StartNotebookInstanceOutput{} + output := &StartMonitoringScheduleOutput{} out.Result = output if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { @@ -21326,7 +21779,7 @@ func (m *awsAwsjson11_deserializeOpStartNotebookInstance) HandleDeserialize(ctx return out, metadata, err } -func awsAwsjson11_deserializeOpErrorStartNotebookInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorStartMonitoringSchedule(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -21367,8 +21820,8 @@ func awsAwsjson11_deserializeOpErrorStartNotebookInstance(response *smithyhttp.R } switch { - case strings.EqualFold("ResourceLimitExceeded", errorCode): - return awsAwsjson11_deserializeErrorResourceLimitExceeded(response, errorBody) + case strings.EqualFold("ResourceNotFound", errorCode): + return awsAwsjson11_deserializeErrorResourceNotFound(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -21380,14 +21833,14 @@ func awsAwsjson11_deserializeOpErrorStartNotebookInstance(response *smithyhttp.R } } -type awsAwsjson11_deserializeOpStartPipelineExecution struct { +type awsAwsjson11_deserializeOpStartNotebookInstance struct { } -func (*awsAwsjson11_deserializeOpStartPipelineExecution) ID() string { +func (*awsAwsjson11_deserializeOpStartNotebookInstance) ID() string { return "OperationDeserializer" } -func (m *awsAwsjson11_deserializeOpStartPipelineExecution) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsjson11_deserializeOpStartNotebookInstance) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -21401,43 +21854,132 @@ func (m *awsAwsjson11_deserializeOpStartPipelineExecution) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsjson11_deserializeOpErrorStartPipelineExecution(response, &metadata) + return out, metadata, awsAwsjson11_deserializeOpErrorStartNotebookInstance(response, &metadata) } - output := &StartPipelineExecutionOutput{} + output := &StartNotebookInstanceOutput{} out.Result = output - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - - body := io.TeeReader(response.Body, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return out, metadata, err - } - - err = awsAwsjson11_deserializeOpDocumentStartPipelineExecutionOutput(&output, shape) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), } - return out, metadata, err } return out, metadata, err } -func awsAwsjson11_deserializeOpErrorStartPipelineExecution(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsjson11_deserializeOpErrorStartNotebookInstance(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("ResourceLimitExceeded", errorCode): + return awsAwsjson11_deserializeErrorResourceLimitExceeded(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpStartPipelineExecution struct { +} + +func (*awsAwsjson11_deserializeOpStartPipelineExecution) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpStartPipelineExecution) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorStartPipelineExecution(response, &metadata) + } + output := &StartPipelineExecutionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentStartPipelineExecutionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorStartPipelineExecution(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -36696,6 +37238,11 @@ func awsAwsjson11_deserializeDocumentJupyterServerAppSettings(v **types.JupyterS return err } + case "LifecycleConfigArns": + if err := awsAwsjson11_deserializeDocumentLifecycleConfigArns(&sv.LifecycleConfigArns, value); err != nil { + return err + } + default: _, _ = key, value @@ -36737,6 +37284,11 @@ func awsAwsjson11_deserializeDocumentKernelGatewayAppSettings(v **types.KernelGa return err } + case "LifecycleConfigArns": + if err := awsAwsjson11_deserializeDocumentLifecycleConfigArns(&sv.LifecycleConfigArns, value); err != nil { + return err + } + default: _, _ = key, value @@ -37838,6 +38390,42 @@ func awsAwsjson11_deserializeDocumentLambdaStepMetadata(v **types.LambdaStepMeta return nil } +func awsAwsjson11_deserializeDocumentLifecycleConfigArns(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StudioLifecycleConfigArn to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsAwsjson11_deserializeDocumentLineageEntityParameters(v *map[string]string, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -45916,6 +46504,118 @@ func awsAwsjson11_deserializeDocumentProfilingParameters(v *map[string]string, v return nil } +func awsAwsjson11_deserializeDocumentProject(v **types.Project, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Project + if *v == nil { + sv = &types.Project{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CreatedBy": + if err := awsAwsjson11_deserializeDocumentUserContext(&sv.CreatedBy, value); err != nil { + return err + } + + case "CreationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "ProjectArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ProjectArn to be of type string, got %T instead", value) + } + sv.ProjectArn = ptr.String(jtv) + } + + case "ProjectDescription": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EntityDescription to be of type string, got %T instead", value) + } + sv.ProjectDescription = ptr.String(jtv) + } + + case "ProjectId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ProjectId to be of type string, got %T instead", value) + } + sv.ProjectId = ptr.String(jtv) + } + + case "ProjectName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ProjectEntityName to be of type string, got %T instead", value) + } + sv.ProjectName = ptr.String(jtv) + } + + case "ProjectStatus": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ProjectStatus to be of type string, got %T instead", value) + } + sv.ProjectStatus = types.ProjectStatus(jtv) + } + + case "ServiceCatalogProvisionedProductDetails": + if err := awsAwsjson11_deserializeDocumentServiceCatalogProvisionedProductDetails(&sv.ServiceCatalogProvisionedProductDetails, value); err != nil { + return err + } + + case "ServiceCatalogProvisioningDetails": + if err := awsAwsjson11_deserializeDocumentServiceCatalogProvisioningDetails(&sv.ServiceCatalogProvisioningDetails, value); err != nil { + return err + } + + case "Tags": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.Tags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentProjectSummary(v **types.ProjectSummary, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -46879,6 +47579,15 @@ func awsAwsjson11_deserializeDocumentResourceSpec(v **types.ResourceSpec, value sv.InstanceType = types.AppInstanceType(jtv) } + case "LifecycleConfigArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StudioLifecycleConfigArn to be of type string, got %T instead", value) + } + sv.LifecycleConfigArn = ptr.String(jtv) + } + case "SageMakerImageArn": if value != nil { jtv, ok := value.(string) @@ -47240,6 +47949,11 @@ func awsAwsjson11_deserializeDocumentSearchRecord(v **types.SearchRecord, value return err } + case "Project": + if err := awsAwsjson11_deserializeDocumentProject(&sv.Project, value); err != nil { + return err + } + case "TrainingJob": if err := awsAwsjson11_deserializeDocumentTrainingJob(&sv.TrainingJob, value); err != nil { return err @@ -47875,6 +48589,130 @@ func awsAwsjson11_deserializeDocumentStoppingCondition(v **types.StoppingConditi return nil } +func awsAwsjson11_deserializeDocumentStudioLifecycleConfigDetails(v **types.StudioLifecycleConfigDetails, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.StudioLifecycleConfigDetails + if *v == nil { + sv = &types.StudioLifecycleConfigDetails{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CreationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "LastModifiedTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastModifiedTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "StudioLifecycleConfigAppType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StudioLifecycleConfigAppType to be of type string, got %T instead", value) + } + sv.StudioLifecycleConfigAppType = types.StudioLifecycleConfigAppType(jtv) + } + + case "StudioLifecycleConfigArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StudioLifecycleConfigArn to be of type string, got %T instead", value) + } + sv.StudioLifecycleConfigArn = ptr.String(jtv) + } + + case "StudioLifecycleConfigName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StudioLifecycleConfigName to be of type string, got %T instead", value) + } + sv.StudioLifecycleConfigName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentStudioLifecycleConfigsList(v *[]types.StudioLifecycleConfigDetails, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.StudioLifecycleConfigDetails + if *v == nil { + cv = []types.StudioLifecycleConfigDetails{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.StudioLifecycleConfigDetails + destAddr := &col + if err := awsAwsjson11_deserializeDocumentStudioLifecycleConfigDetails(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsAwsjson11_deserializeDocumentSubnets(v *[]string, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -53853,6 +54691,46 @@ func awsAwsjson11_deserializeOpDocumentCreateProjectOutput(v **CreateProjectOutp return nil } +func awsAwsjson11_deserializeOpDocumentCreateStudioLifecycleConfigOutput(v **CreateStudioLifecycleConfigOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateStudioLifecycleConfigOutput + if *v == nil { + sv = &CreateStudioLifecycleConfigOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "StudioLifecycleConfigArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StudioLifecycleConfigArn to be of type string, got %T instead", value) + } + sv.StudioLifecycleConfigArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentCreateTrainingJobOutput(v **CreateTrainingJobOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -59663,6 +60541,105 @@ func awsAwsjson11_deserializeOpDocumentDescribeProjectOutput(v **DescribeProject return nil } +func awsAwsjson11_deserializeOpDocumentDescribeStudioLifecycleConfigOutput(v **DescribeStudioLifecycleConfigOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeStudioLifecycleConfigOutput + if *v == nil { + sv = &DescribeStudioLifecycleConfigOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CreationTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "LastModifiedTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastModifiedTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "StudioLifecycleConfigAppType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StudioLifecycleConfigAppType to be of type string, got %T instead", value) + } + sv.StudioLifecycleConfigAppType = types.StudioLifecycleConfigAppType(jtv) + } + + case "StudioLifecycleConfigArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StudioLifecycleConfigArn to be of type string, got %T instead", value) + } + sv.StudioLifecycleConfigArn = ptr.String(jtv) + } + + case "StudioLifecycleConfigContent": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StudioLifecycleConfigContent to be of type string, got %T instead", value) + } + sv.StudioLifecycleConfigContent = ptr.String(jtv) + } + + case "StudioLifecycleConfigName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected StudioLifecycleConfigName to be of type string, got %T instead", value) + } + sv.StudioLifecycleConfigName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentDescribeSubscribedWorkteamOutput(v **DescribeSubscribedWorkteamOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -63008,6 +63985,51 @@ func awsAwsjson11_deserializeOpDocumentListProjectsOutput(v **ListProjectsOutput return nil } +func awsAwsjson11_deserializeOpDocumentListStudioLifecycleConfigsOutput(v **ListStudioLifecycleConfigsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListStudioLifecycleConfigsOutput + if *v == nil { + sv = &ListStudioLifecycleConfigsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "StudioLifecycleConfigs": + if err := awsAwsjson11_deserializeDocumentStudioLifecycleConfigsList(&sv.StudioLifecycleConfigs, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentListSubscribedWorkteamsOutput(v **ListSubscribedWorkteamsOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -63543,6 +64565,46 @@ func awsAwsjson11_deserializeOpDocumentRenderUiTemplateOutput(v **RenderUiTempla return nil } +func awsAwsjson11_deserializeOpDocumentRetryPipelineExecutionOutput(v **RetryPipelineExecutionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RetryPipelineExecutionOutput + if *v == nil { + sv = &RetryPipelineExecutionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "PipelineExecutionArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PipelineExecutionArn to be of type string, got %T instead", value) + } + sv.PipelineExecutionArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeOpDocumentSearchOutput(v **SearchOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/sagemaker/generated.json b/service/sagemaker/generated.json index 2dce11e1182..49481379b20 100644 --- a/service/sagemaker/generated.json +++ b/service/sagemaker/generated.json @@ -46,6 +46,7 @@ "api_op_CreatePresignedNotebookInstanceUrl.go", "api_op_CreateProcessingJob.go", "api_op_CreateProject.go", + "api_op_CreateStudioLifecycleConfig.go", "api_op_CreateTrainingJob.go", "api_op_CreateTransformJob.go", "api_op_CreateTrial.go", @@ -84,6 +85,7 @@ "api_op_DeleteNotebookInstanceLifecycleConfig.go", "api_op_DeletePipeline.go", "api_op_DeleteProject.go", + "api_op_DeleteStudioLifecycleConfig.go", "api_op_DeleteTags.go", "api_op_DeleteTrial.go", "api_op_DeleteTrialComponent.go", @@ -129,6 +131,7 @@ "api_op_DescribePipelineExecution.go", "api_op_DescribeProcessingJob.go", "api_op_DescribeProject.go", + "api_op_DescribeStudioLifecycleConfig.go", "api_op_DescribeSubscribedWorkteam.go", "api_op_DescribeTrainingJob.go", "api_op_DescribeTransformJob.go", @@ -187,6 +190,7 @@ "api_op_ListPipelines.go", "api_op_ListProcessingJobs.go", "api_op_ListProjects.go", + "api_op_ListStudioLifecycleConfigs.go", "api_op_ListSubscribedWorkteams.go", "api_op_ListTags.go", "api_op_ListTrainingJobs.go", @@ -200,6 +204,7 @@ "api_op_PutModelPackageGroupPolicy.go", "api_op_RegisterDevices.go", "api_op_RenderUiTemplate.go", + "api_op_RetryPipelineExecution.go", "api_op_Search.go", "api_op_SendPipelineExecutionStepFailure.go", "api_op_SendPipelineExecutionStepSuccess.go", diff --git a/service/sagemaker/serializers.go b/service/sagemaker/serializers.go index 61be75e7421..2c362b4d008 100644 --- a/service/sagemaker/serializers.go +++ b/service/sagemaker/serializers.go @@ -1896,6 +1896,53 @@ func (m *awsAwsjson11_serializeOpCreateProject) HandleSerialize(ctx context.Cont return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpCreateStudioLifecycleConfig struct { +} + +func (*awsAwsjson11_serializeOpCreateStudioLifecycleConfig) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateStudioLifecycleConfig) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateStudioLifecycleConfigInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SageMaker.CreateStudioLifecycleConfig") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateStudioLifecycleConfigInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpCreateTrainingJob struct { } @@ -3682,6 +3729,53 @@ func (m *awsAwsjson11_serializeOpDeleteProject) HandleSerialize(ctx context.Cont return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpDeleteStudioLifecycleConfig struct { +} + +func (*awsAwsjson11_serializeOpDeleteStudioLifecycleConfig) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteStudioLifecycleConfig) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteStudioLifecycleConfigInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SageMaker.DeleteStudioLifecycleConfig") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteStudioLifecycleConfigInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpDeleteTags struct { } @@ -5797,6 +5891,53 @@ func (m *awsAwsjson11_serializeOpDescribeProject) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpDescribeStudioLifecycleConfig struct { +} + +func (*awsAwsjson11_serializeOpDescribeStudioLifecycleConfig) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeStudioLifecycleConfig) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeStudioLifecycleConfigInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SageMaker.DescribeStudioLifecycleConfig") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeStudioLifecycleConfigInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpDescribeSubscribedWorkteam struct { } @@ -8523,6 +8664,53 @@ func (m *awsAwsjson11_serializeOpListProjects) HandleSerialize(ctx context.Conte return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpListStudioLifecycleConfigs struct { +} + +func (*awsAwsjson11_serializeOpListStudioLifecycleConfigs) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListStudioLifecycleConfigs) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListStudioLifecycleConfigsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SageMaker.ListStudioLifecycleConfigs") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListStudioLifecycleConfigsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpListSubscribedWorkteams struct { } @@ -9134,6 +9322,53 @@ func (m *awsAwsjson11_serializeOpRenderUiTemplate) HandleSerialize(ctx context.C return next.HandleSerialize(ctx, in) } +type awsAwsjson11_serializeOpRetryPipelineExecution struct { +} + +func (*awsAwsjson11_serializeOpRetryPipelineExecution) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpRetryPipelineExecution) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RetryPipelineExecutionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + request.Request.URL.Path = "/" + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("SageMaker.RetryPipelineExecution") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentRetryPipelineExecutionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson11_serializeOpSearch struct { } @@ -13678,6 +13913,13 @@ func awsAwsjson11_serializeDocumentJupyterServerAppSettings(v *types.JupyterServ } } + if v.LifecycleConfigArns != nil { + ok := object.Key("LifecycleConfigArns") + if err := awsAwsjson11_serializeDocumentLifecycleConfigArns(v.LifecycleConfigArns, ok); err != nil { + return err + } + } + return nil } @@ -13699,6 +13941,13 @@ func awsAwsjson11_serializeDocumentKernelGatewayAppSettings(v *types.KernelGatew } } + if v.LifecycleConfigArns != nil { + ok := object.Key("LifecycleConfigArns") + if err := awsAwsjson11_serializeDocumentLifecycleConfigArns(v.LifecycleConfigArns, ok); err != nil { + return err + } + } + return nil } @@ -13908,6 +14157,17 @@ func awsAwsjson11_serializeDocumentLabelingJobStoppingConditions(v *types.Labeli return nil } +func awsAwsjson11_serializeDocumentLifecycleConfigArns(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsAwsjson11_serializeDocumentLineageEntityParameters(v map[string]string, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -15994,6 +16254,11 @@ func awsAwsjson11_serializeDocumentResourceSpec(v *types.ResourceSpec, value smi ok.String(string(v.InstanceType)) } + if v.LifecycleConfigArn != nil { + ok := object.Key("LifecycleConfigArn") + ok.String(*v.LifecycleConfigArn) + } + if v.SageMakerImageArn != nil { ok := object.Key("SageMakerImageArn") ok.String(*v.SageMakerImageArn) @@ -18865,6 +19130,35 @@ func awsAwsjson11_serializeOpDocumentCreateProjectInput(v *CreateProjectInput, v return nil } +func awsAwsjson11_serializeOpDocumentCreateStudioLifecycleConfigInput(v *CreateStudioLifecycleConfigInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.StudioLifecycleConfigAppType) > 0 { + ok := object.Key("StudioLifecycleConfigAppType") + ok.String(string(v.StudioLifecycleConfigAppType)) + } + + if v.StudioLifecycleConfigContent != nil { + ok := object.Key("StudioLifecycleConfigContent") + ok.String(*v.StudioLifecycleConfigContent) + } + + if v.StudioLifecycleConfigName != nil { + ok := object.Key("StudioLifecycleConfigName") + ok.String(*v.StudioLifecycleConfigName) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + func awsAwsjson11_serializeOpDocumentCreateTrainingJobInput(v *CreateTrainingJobInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -19749,6 +20043,18 @@ func awsAwsjson11_serializeOpDocumentDeleteProjectInput(v *DeleteProjectInput, v return nil } +func awsAwsjson11_serializeOpDocumentDeleteStudioLifecycleConfigInput(v *DeleteStudioLifecycleConfigInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.StudioLifecycleConfigName != nil { + ok := object.Key("StudioLifecycleConfigName") + ok.String(*v.StudioLifecycleConfigName) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentDeleteTagsInput(v *DeleteTagsInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -20343,6 +20649,18 @@ func awsAwsjson11_serializeOpDocumentDescribeProjectInput(v *DescribeProjectInpu return nil } +func awsAwsjson11_serializeOpDocumentDescribeStudioLifecycleConfigInput(v *DescribeStudioLifecycleConfigInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.StudioLifecycleConfigName != nil { + ok := object.Key("StudioLifecycleConfigName") + ok.String(*v.StudioLifecycleConfigName) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentDescribeSubscribedWorkteamInput(v *DescribeSubscribedWorkteamInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -22586,6 +22904,63 @@ func awsAwsjson11_serializeOpDocumentListProjectsInput(v *ListProjectsInput, val return nil } +func awsAwsjson11_serializeOpDocumentListStudioLifecycleConfigsInput(v *ListStudioLifecycleConfigsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.AppTypeEquals) > 0 { + ok := object.Key("AppTypeEquals") + ok.String(string(v.AppTypeEquals)) + } + + if v.CreationTimeAfter != nil { + ok := object.Key("CreationTimeAfter") + ok.Double(smithytime.FormatEpochSeconds(*v.CreationTimeAfter)) + } + + if v.CreationTimeBefore != nil { + ok := object.Key("CreationTimeBefore") + ok.Double(smithytime.FormatEpochSeconds(*v.CreationTimeBefore)) + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.ModifiedTimeAfter != nil { + ok := object.Key("ModifiedTimeAfter") + ok.Double(smithytime.FormatEpochSeconds(*v.ModifiedTimeAfter)) + } + + if v.ModifiedTimeBefore != nil { + ok := object.Key("ModifiedTimeBefore") + ok.Double(smithytime.FormatEpochSeconds(*v.ModifiedTimeBefore)) + } + + if v.NameContains != nil { + ok := object.Key("NameContains") + ok.String(*v.NameContains) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + if len(v.SortBy) > 0 { + ok := object.Key("SortBy") + ok.String(string(v.SortBy)) + } + + if len(v.SortOrder) > 0 { + ok := object.Key("SortOrder") + ok.String(string(v.SortOrder)) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentListSubscribedWorkteamsInput(v *ListSubscribedWorkteamsInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -23055,6 +23430,23 @@ func awsAwsjson11_serializeOpDocumentRenderUiTemplateInput(v *RenderUiTemplateIn return nil } +func awsAwsjson11_serializeOpDocumentRetryPipelineExecutionInput(v *RetryPipelineExecutionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientRequestToken != nil { + ok := object.Key("ClientRequestToken") + ok.String(*v.ClientRequestToken) + } + + if v.PipelineExecutionArn != nil { + ok := object.Key("PipelineExecutionArn") + ok.String(*v.PipelineExecutionArn) + } + + return nil +} + func awsAwsjson11_serializeOpDocumentSearchInput(v *SearchInput, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/service/sagemaker/types/enums.go b/service/sagemaker/types/enums.go index 99e5498820f..c65d9712adc 100644 --- a/service/sagemaker/types/enums.go +++ b/service/sagemaker/types/enums.go @@ -3095,6 +3095,7 @@ const ( ResourceTypePipeline ResourceType = "Pipeline" ResourceTypePipelineExecution ResourceType = "PipelineExecution" ResourceTypeFeatureGroup ResourceType = "FeatureGroup" + ResourceTypeProject ResourceType = "Project" ) // Values returns all known values for ResourceType. Note that this can be expanded @@ -3112,6 +3113,7 @@ func (ResourceType) Values() []ResourceType { "Pipeline", "PipelineExecution", "FeatureGroup", + "Project", } } @@ -3572,6 +3574,44 @@ func (StepStatus) Values() []StepStatus { } } +type StudioLifecycleConfigAppType string + +// Enum values for StudioLifecycleConfigAppType +const ( + StudioLifecycleConfigAppTypeJupyterServer StudioLifecycleConfigAppType = "JupyterServer" + StudioLifecycleConfigAppTypeKernelGateway StudioLifecycleConfigAppType = "KernelGateway" +) + +// Values returns all known values for StudioLifecycleConfigAppType. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (StudioLifecycleConfigAppType) Values() []StudioLifecycleConfigAppType { + return []StudioLifecycleConfigAppType{ + "JupyterServer", + "KernelGateway", + } +} + +type StudioLifecycleConfigSortKey string + +// Enum values for StudioLifecycleConfigSortKey +const ( + StudioLifecycleConfigSortKeyCreationTime StudioLifecycleConfigSortKey = "CreationTime" + StudioLifecycleConfigSortKeyLastModifiedTime StudioLifecycleConfigSortKey = "LastModifiedTime" + StudioLifecycleConfigSortKeyName StudioLifecycleConfigSortKey = "Name" +) + +// Values returns all known values for StudioLifecycleConfigSortKey. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (StudioLifecycleConfigSortKey) Values() []StudioLifecycleConfigSortKey { + return []StudioLifecycleConfigSortKey{ + "CreationTime", + "LastModifiedTime", + "Name", + } +} + type TargetDevice string // Enum values for TargetDevice @@ -3606,6 +3646,7 @@ const ( TargetDeviceX86Win64 TargetDevice = "x86_win64" TargetDeviceCoreml TargetDevice = "coreml" TargetDeviceJacintoTda4vm TargetDevice = "jacinto_tda4vm" + TargetDeviceImx8mplus TargetDevice = "imx8mplus" ) // Values returns all known values for TargetDevice. Note that this can be expanded @@ -3643,6 +3684,7 @@ func (TargetDevice) Values() []TargetDevice { "x86_win64", "coreml", "jacinto_tda4vm", + "imx8mplus", } } diff --git a/service/sagemaker/types/types.go b/service/sagemaker/types/types.go index a6344d261cc..630fa31969f 100644 --- a/service/sagemaker/types/types.go +++ b/service/sagemaker/types/types.go @@ -1308,8 +1308,8 @@ type AssociationSummary struct { // The type of the association. AssociationType AssociationEdgeType - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. CreatedBy *UserContext // When the association was created. @@ -3256,8 +3256,7 @@ type EndpointSummary struct { // The properties of an experiment as returned by the Search API. type Experiment struct { - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Who created the experiment. CreatedBy *UserContext // When the experiment was created. @@ -3276,8 +3275,8 @@ type Experiment struct { // The name of the experiment. ExperimentName *string - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. LastModifiedBy *UserContext // When the experiment was last modified. @@ -5930,6 +5929,10 @@ type JupyterServerAppSettings struct { // SageMaker image used by the JupyterServer app. DefaultResourceSpec *ResourceSpec + // The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the + // JupyterServerApp. + LifecycleConfigArns []string + noSmithyDocumentSerde } @@ -5944,6 +5947,10 @@ type KernelGatewayAppSettings struct { // SageMaker image used by the KernelGateway app. DefaultResourceSpec *ResourceSpec + // The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the + // the user profile or domain. + LifecycleConfigArns []string + noSmithyDocumentSerde } @@ -6190,10 +6197,10 @@ type LabelingJobResourceConfig struct { // ML compute instance(s) that run the training and inference jobs used for // automated data labeling. You can only specify a VolumeKmsKeyId when you create a // labeling job with automated data labeling enabled using the API operation - // CreateLabelingJob. You cannot specify an Amazon Web Services KMS customer - // managed CMK to encrypt the storage volume used for automated data labeling model - // training and inference when you create a labeling job using the console. To - // learn more, see Output Data and Storage Volume Encryption + // CreateLabelingJob. You cannot specify an Amazon Web Services KMS key to encrypt + // the storage volume used for automated data labeling model training and inference + // when you create a labeling job using the console. To learn more, see Output Data + // and Storage Volume Encryption // (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-security.html). The // VolumeKmsKeyId can be any of the following formats: // @@ -6648,8 +6655,8 @@ type ModelPackage struct { // (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-mkt-list.html). CertifyForMarketplace bool - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. CreatedBy *UserContext // The time that the model package was created. @@ -6658,8 +6665,8 @@ type ModelPackage struct { // Defines how to perform inference generation after a training job is run. InferenceSpecification *InferenceSpecification - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. LastModifiedBy *UserContext // The last time the model package was modified. @@ -6774,8 +6781,8 @@ type ModelPackageContainerDefinition struct { // A group of versioned models in the model registry. type ModelPackageGroup struct { - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. CreatedBy *UserContext // The time that the model group was created. @@ -8126,8 +8133,8 @@ type OutputDataConfig struct { // Key Alias "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias" // // If you use a - // KMS key ID or an alias of your master key, the Amazon SageMaker execution role - // must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, + // KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must + // include permissions to call kms:Encrypt. If you don't provide a KMS key ID, // Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. // Amazon SageMaker uses server-side encryption with KMS-managed keys for // OutputDataConfig. If you use a bucket policy with an s3:PutObject permission @@ -8251,15 +8258,15 @@ type ParentHyperParameterTuningJob struct { // A SageMaker Model Building Pipeline instance. type Pipeline struct { - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. CreatedBy *UserContext // The creation time of the pipeline. CreationTime *time.Time - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. LastModifiedBy *UserContext // The time that the pipeline was last modified. @@ -8295,8 +8302,8 @@ type Pipeline struct { // An execution of a pipeline. type PipelineExecution struct { - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. CreatedBy *UserContext // The creation time of the pipeline execution. @@ -8305,8 +8312,8 @@ type PipelineExecution struct { // If the execution failed, a message describing why. FailureReason *string - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. LastModifiedBy *UserContext // The time that the pipeline execution was last modified. @@ -8906,8 +8913,8 @@ type ProductionVariantCoreDumpConfig struct { // Key Alias "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias" // // If you use a - // KMS key ID or an alias of your master key, the Amazon SageMaker execution role - // must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, + // KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must + // include permissions to call kms:Encrypt. If you don't provide a KMS key ID, // Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. // Amazon SageMaker uses server-side encryption with KMS-managed keys for // OutputDataConfig. If you use a bucket policy with an s3:PutObject permission @@ -9067,6 +9074,49 @@ type ProfilerRuleEvaluationStatus struct { noSmithyDocumentSerde } +// The properties of a project as returned by the Search API. +type Project struct { + + // Who created the project. + CreatedBy *UserContext + + // A timestamp specifying when the project was created. + CreationTime *time.Time + + // The Amazon Resource Name (ARN) of the project. + ProjectArn *string + + // The description of the project. + ProjectDescription *string + + // The ID of the project. + ProjectId *string + + // The name of the project. + ProjectName *string + + // The status of the project. + ProjectStatus ProjectStatus + + // Details of a provisioned service catalog product. For information about service + // catalog, see What is Amazon Web Services Service Catalog + // (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). + ServiceCatalogProvisionedProductDetails *ServiceCatalogProvisionedProductDetails + + // Details that you specify to provision a service catalog product. For information + // about service catalog, see What is Amazon Web Services Service Catalog + // (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). + ServiceCatalogProvisioningDetails *ServiceCatalogProvisioningDetails + + // An array of key-value pairs. You can use tags to categorize your Amazon Web + // Services resources in different ways, for example, by purpose, owner, or + // environment. For more information, see Tagging Amazon Web Services Resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html). + Tags []Tag + + noSmithyDocumentSerde +} + // Information about a project. type ProjectSummary struct { @@ -9569,6 +9619,10 @@ type ResourceSpec struct { // The instance type that the image version runs on. InstanceType AppInstanceType + // The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the + // Resource. + LifecycleConfigArn *string + // The ARN of the SageMaker image that the image version belongs to. SageMakerImageArn *string @@ -9802,6 +9856,9 @@ type SearchRecord struct { // An execution of a pipeline. PipelineExecution *PipelineExecution + // The properties of a project. + Project *Project + // The properties of a training job. TrainingJob *TrainingJob @@ -9968,7 +10025,7 @@ type ServiceCatalogProvisionedProductDetails struct { } // Details that you specify to provision a service catalog product. For information -// about service catalog, see .What is Amazon Web Services Service Catalog +// about service catalog, see What is Amazon Web Services Service Catalog // (https://docs.aws.amazon.com/servicecatalog/latest/adminguide/introduction.html). type ServiceCatalogProvisioningDetails struct { @@ -10126,6 +10183,28 @@ type StoppingCondition struct { noSmithyDocumentSerde } +// Details of the Studio Lifecycle Configuration. +type StudioLifecycleConfigDetails struct { + + // The creation time of the Studio Lifecycle Configuration. + CreationTime *time.Time + + // This value is equivalent to CreationTime because Studio Lifecycle Configurations + // are immutable. + LastModifiedTime *time.Time + + // The App type to which the Lifecycle Configuration is attached. + StudioLifecycleConfigAppType StudioLifecycleConfigAppType + + // The Amazon Resource Name (ARN) of the Lifecycle Configuration. + StudioLifecycleConfigArn *string + + // The name of the Studio Lifecycle Configuration. + StudioLifecycleConfigName *string + + noSmithyDocumentSerde +} + // Describes a work team of a vendor that does the a labelling job. type SubscribedWorkteam struct { @@ -11098,8 +11177,7 @@ type TransformS3DataSource struct { // The properties of a trial as returned by the Search API. type Trial struct { - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Who created the trial. CreatedBy *UserContext // When the trial was created. @@ -11112,8 +11190,8 @@ type Trial struct { // The name of the experiment the trial is part of. ExperimentName *string - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. LastModifiedBy *UserContext // Who last modified the trial. @@ -11145,8 +11223,7 @@ type Trial struct { // The properties of a trial component as returned by the Search API. type TrialComponent struct { - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Who created the trial component. CreatedBy *UserContext // When the component was created. @@ -11162,8 +11239,8 @@ type TrialComponent struct { // The input artifacts of the component. InputArtifacts map[string]TrialComponentArtifact - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. LastModifiedBy *UserContext // When the component was last modified. @@ -11298,8 +11375,8 @@ func (*TrialComponentParameterValueMemberNumberValue) isTrialComponentParameterV // A short summary of a trial component. type TrialComponentSimpleSummary struct { - // Information about the user who created or modified an experiment, trial, or - // trial component. + // Information about the user who created or modified an experiment, trial, trial + // component, or project. CreatedBy *UserContext // When the component was created. @@ -11366,7 +11443,7 @@ type TrialComponentStatus struct { // call the DescribeTrialComponent API and provide the TrialComponentName. type TrialComponentSummary struct { - // Who created the component. + // Who created the trial component. CreatedBy *UserContext // When the component was created. @@ -11571,8 +11648,8 @@ type USD struct { noSmithyDocumentSerde } -// Information about the user who created or modified an experiment, trial, or -// trial component. +// Information about the user who created or modified an experiment, trial, trial +// component, or project. type UserContext struct { // The domain associated with the user. diff --git a/service/sagemaker/validators.go b/service/sagemaker/validators.go index a3c5a61137a..87249d5245a 100644 --- a/service/sagemaker/validators.go +++ b/service/sagemaker/validators.go @@ -810,6 +810,26 @@ func (m *validateOpCreateProject) HandleInitialize(ctx context.Context, in middl return next.HandleInitialize(ctx, in) } +type validateOpCreateStudioLifecycleConfig struct { +} + +func (*validateOpCreateStudioLifecycleConfig) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateStudioLifecycleConfig) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateStudioLifecycleConfigInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateStudioLifecycleConfigInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreateTrainingJob struct { } @@ -1570,6 +1590,26 @@ func (m *validateOpDeleteProject) HandleInitialize(ctx context.Context, in middl return next.HandleInitialize(ctx, in) } +type validateOpDeleteStudioLifecycleConfig struct { +} + +func (*validateOpDeleteStudioLifecycleConfig) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteStudioLifecycleConfig) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteStudioLifecycleConfigInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteStudioLifecycleConfigInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDeleteTags struct { } @@ -2470,6 +2510,26 @@ func (m *validateOpDescribeProject) HandleInitialize(ctx context.Context, in mid return next.HandleInitialize(ctx, in) } +type validateOpDescribeStudioLifecycleConfig struct { +} + +func (*validateOpDescribeStudioLifecycleConfig) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeStudioLifecycleConfig) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeStudioLifecycleConfigInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeStudioLifecycleConfigInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDescribeSubscribedWorkteam struct { } @@ -2910,6 +2970,26 @@ func (m *validateOpRenderUiTemplate) HandleInitialize(ctx context.Context, in mi return next.HandleInitialize(ctx, in) } +type validateOpRetryPipelineExecution struct { +} + +func (*validateOpRetryPipelineExecution) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRetryPipelineExecution) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RetryPipelineExecutionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRetryPipelineExecutionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpSearch struct { } @@ -3890,6 +3970,10 @@ func addOpCreateProjectValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateProject{}, middleware.After) } +func addOpCreateStudioLifecycleConfigValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateStudioLifecycleConfig{}, middleware.After) +} + func addOpCreateTrainingJobValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateTrainingJob{}, middleware.After) } @@ -4042,6 +4126,10 @@ func addOpDeleteProjectValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteProject{}, middleware.After) } +func addOpDeleteStudioLifecycleConfigValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteStudioLifecycleConfig{}, middleware.After) +} + func addOpDeleteTagsValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteTags{}, middleware.After) } @@ -4222,6 +4310,10 @@ func addOpDescribeProjectValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDescribeProject{}, middleware.After) } +func addOpDescribeStudioLifecycleConfigValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeStudioLifecycleConfig{}, middleware.After) +} + func addOpDescribeSubscribedWorkteamValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDescribeSubscribedWorkteam{}, middleware.After) } @@ -4310,6 +4402,10 @@ func addOpRenderUiTemplateValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpRenderUiTemplate{}, middleware.After) } +func addOpRetryPipelineExecutionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRetryPipelineExecution{}, middleware.After) +} + func addOpSearchValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpSearch{}, middleware.After) } @@ -9680,6 +9776,32 @@ func validateOpCreateProjectInput(v *CreateProjectInput) error { } } +func validateOpCreateStudioLifecycleConfigInput(v *CreateStudioLifecycleConfigInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateStudioLifecycleConfigInput"} + if v.StudioLifecycleConfigName == nil { + invalidParams.Add(smithy.NewErrParamRequired("StudioLifecycleConfigName")) + } + if v.StudioLifecycleConfigContent == nil { + invalidParams.Add(smithy.NewErrParamRequired("StudioLifecycleConfigContent")) + } + if len(v.StudioLifecycleConfigAppType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("StudioLifecycleConfigAppType")) + } + if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateTrainingJobInput(v *CreateTrainingJobInput) error { if v == nil { return nil @@ -10447,6 +10569,21 @@ func validateOpDeleteProjectInput(v *DeleteProjectInput) error { } } +func validateOpDeleteStudioLifecycleConfigInput(v *DeleteStudioLifecycleConfigInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteStudioLifecycleConfigInput"} + if v.StudioLifecycleConfigName == nil { + invalidParams.Add(smithy.NewErrParamRequired("StudioLifecycleConfigName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDeleteTagsInput(v *DeleteTagsInput) error { if v == nil { return nil @@ -11143,6 +11280,21 @@ func validateOpDescribeProjectInput(v *DescribeProjectInput) error { } } +func validateOpDescribeStudioLifecycleConfigInput(v *DescribeStudioLifecycleConfigInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeStudioLifecycleConfigInput"} + if v.StudioLifecycleConfigName == nil { + invalidParams.Add(smithy.NewErrParamRequired("StudioLifecycleConfigName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDescribeSubscribedWorkteamInput(v *DescribeSubscribedWorkteamInput) error { if v == nil { return nil @@ -11511,6 +11663,24 @@ func validateOpRenderUiTemplateInput(v *RenderUiTemplateInput) error { } } +func validateOpRetryPipelineExecutionInput(v *RetryPipelineExecutionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RetryPipelineExecutionInput"} + if v.PipelineExecutionArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("PipelineExecutionArn")) + } + if v.ClientRequestToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientRequestToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpSearchInput(v *SearchInput) error { if v == nil { return nil diff --git a/service/ssooidc/internal/endpoints/endpoints.go b/service/ssooidc/internal/endpoints/endpoints.go index 197383d62f3..eef58dd600c 100644 --- a/service/ssooidc/internal/endpoints/endpoints.go +++ b/service/ssooidc/internal/endpoints/endpoints.go @@ -188,5 +188,13 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.AwsUsGov, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + "us-gov-west-1": endpoints.Endpoint{ + Hostname: "oidc.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + }, }, } diff --git a/service/transcribe/api_op_StartMedicalTranscriptionJob.go b/service/transcribe/api_op_StartMedicalTranscriptionJob.go index 0fd31765f85..76b76ce729a 100644 --- a/service/transcribe/api_op_StartMedicalTranscriptionJob.go +++ b/service/transcribe/api_op_StartMedicalTranscriptionJob.go @@ -85,6 +85,10 @@ type StartMedicalTranscriptionJobInput struct { // output. ContentIdentificationType types.MedicalContentIdentificationType + // A map of plain text, non-secret key:value pairs, known as encryption context + // pairs, that provide an added layer of security for your data. + KMSEncryptionContext map[string]string + // The audio format of the input media file. MediaFormat types.MediaFormat diff --git a/service/transcribe/api_op_StartTranscriptionJob.go b/service/transcribe/api_op_StartTranscriptionJob.go index 173696aa9d5..ce5d2aed454 100644 --- a/service/transcribe/api_op_StartTranscriptionJob.go +++ b/service/transcribe/api_op_StartTranscriptionJob.go @@ -55,6 +55,10 @@ type StartTranscriptionJobInput struct { // limit is reached and there are no slots available to immediately run the job. JobExecutionSettings *types.JobExecutionSettings + // A map of plain text, non-secret key:value pairs, known as encryption context + // pairs, that provide an added layer of security for your data. + KMSEncryptionContext map[string]string + // The language code for the language used in the input media file. To transcribe // speech in Modern Standard Arabic (ar-SA), your audio or video file must be // encoded at a sample rate of 16,000 Hz or higher. @@ -146,6 +150,9 @@ type StartTranscriptionJobInput struct { // A Settings object that provides optional settings for a transcription job. Settings *types.Settings + // Add subtitles to your batch transcription job. + Subtitles *types.Subtitles + // Add tags to an Amazon Transcribe transcription job. Tags []types.Tag diff --git a/service/transcribe/deserializers.go b/service/transcribe/deserializers.go index fe2e2fb227f..6cf649fbe99 100644 --- a/service/transcribe/deserializers.go +++ b/service/transcribe/deserializers.go @@ -7136,6 +7136,119 @@ func awsAwsjson11_deserializeDocumentStringTargetList(v *[]string, value interfa return nil } +func awsAwsjson11_deserializeDocumentSubtitleFileUris(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Uri to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentSubtitleFormats(v *[]types.SubtitleFormat, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.SubtitleFormat + if *v == nil { + cv = []types.SubtitleFormat{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.SubtitleFormat + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SubtitleFormat to be of type string, got %T instead", value) + } + col = types.SubtitleFormat(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentSubtitlesOutput(v **types.SubtitlesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SubtitlesOutput + if *v == nil { + sv = &types.SubtitlesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Formats": + if err := awsAwsjson11_deserializeDocumentSubtitleFormats(&sv.Formats, value); err != nil { + return err + } + + case "SubtitleFileUris": + if err := awsAwsjson11_deserializeDocumentSubtitleFileUris(&sv.SubtitleFileUris, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson11_deserializeDocumentTag(v **types.Tag, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -7524,6 +7637,11 @@ func awsAwsjson11_deserializeDocumentTranscriptionJob(v **types.TranscriptionJob } } + case "Subtitles": + if err := awsAwsjson11_deserializeDocumentSubtitlesOutput(&sv.Subtitles, value); err != nil { + return err + } + case "Tags": if err := awsAwsjson11_deserializeDocumentTagList(&sv.Tags, value); err != nil { return err diff --git a/service/transcribe/serializers.go b/service/transcribe/serializers.go index eee553c9e7d..49d2eea38cc 100644 --- a/service/transcribe/serializers.go +++ b/service/transcribe/serializers.go @@ -2036,6 +2036,17 @@ func awsAwsjson11_serializeDocumentJobExecutionSettings(v *types.JobExecutionSet return nil } +func awsAwsjson11_serializeDocumentKMSEncryptionContextMap(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} + func awsAwsjson11_serializeDocumentLanguageOptions(v []types.LanguageCode, value smithyjson.Value) error { array := value.Array() defer array.Close() @@ -2341,6 +2352,31 @@ func awsAwsjson11_serializeDocumentStringTargetList(v []string, value smithyjson return nil } +func awsAwsjson11_serializeDocumentSubtitleFormats(v []types.SubtitleFormat, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentSubtitles(v *types.Subtitles, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Formats != nil { + ok := object.Key("Formats") + if err := awsAwsjson11_serializeDocumentSubtitleFormats(v.Formats, ok); err != nil { + return err + } + } + + return nil +} + func awsAwsjson11_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -3054,6 +3090,13 @@ func awsAwsjson11_serializeOpDocumentStartMedicalTranscriptionJobInput(v *StartM ok.String(string(v.ContentIdentificationType)) } + if v.KMSEncryptionContext != nil { + ok := object.Key("KMSEncryptionContext") + if err := awsAwsjson11_serializeDocumentKMSEncryptionContextMap(v.KMSEncryptionContext, ok); err != nil { + return err + } + } + if len(v.LanguageCode) > 0 { ok := object.Key("LanguageCode") ok.String(string(v.LanguageCode)) @@ -3146,6 +3189,13 @@ func awsAwsjson11_serializeOpDocumentStartTranscriptionJobInput(v *StartTranscri } } + if v.KMSEncryptionContext != nil { + ok := object.Key("KMSEncryptionContext") + if err := awsAwsjson11_serializeDocumentKMSEncryptionContextMap(v.KMSEncryptionContext, ok); err != nil { + return err + } + } + if len(v.LanguageCode) > 0 { ok := object.Key("LanguageCode") ok.String(string(v.LanguageCode)) @@ -3204,6 +3254,13 @@ func awsAwsjson11_serializeOpDocumentStartTranscriptionJobInput(v *StartTranscri } } + if v.Subtitles != nil { + ok := object.Key("Subtitles") + if err := awsAwsjson11_serializeDocumentSubtitles(v.Subtitles, ok); err != nil { + return err + } + } + if v.Tags != nil { ok := object.Key("Tags") if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { diff --git a/service/transcribe/types/enums.go b/service/transcribe/types/enums.go index 41b03a47cd6..1699aaffdc9 100644 --- a/service/transcribe/types/enums.go +++ b/service/transcribe/types/enums.go @@ -333,6 +333,24 @@ func (Specialty) Values() []Specialty { } } +type SubtitleFormat string + +// Enum values for SubtitleFormat +const ( + SubtitleFormatVtt SubtitleFormat = "vtt" + SubtitleFormatSrt SubtitleFormat = "srt" +) + +// Values returns all known values for SubtitleFormat. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (SubtitleFormat) Values() []SubtitleFormat { + return []SubtitleFormat{ + "vtt", + "srt", + } +} + type TranscriptFilterType string // Enum values for TranscriptFilterType diff --git a/service/transcribe/types/types.go b/service/transcribe/types/types.go index 96bb67d0c14..5bc1db31737 100644 --- a/service/transcribe/types/types.go +++ b/service/transcribe/types/types.go @@ -327,8 +327,9 @@ type JobExecutionSettings struct { // concurrent execution limit is exceeded. When the AllowDeferredExecution field is // true, jobs are queued and executed when the number of executing jobs falls below // the concurrent execution limit. If the field is false, Amazon Transcribe returns - // a LimitExceededException exception. If you specify the AllowDeferredExecution - // field, you must specify the DataAccessRoleArn field. + // a LimitExceededException exception. Note that job queuing is enabled by default + // for call analytics jobs. If you specify the AllowDeferredExecution field, you + // must specify the DataAccessRoleArn field. AllowDeferredExecution *bool // The Amazon Resource Name (ARN) of a role that has access to the S3 bucket that @@ -826,6 +827,29 @@ type Settings struct { noSmithyDocumentSerde } +// Generate subtitles for your batch transcription job. +type Subtitles struct { + + // Specify the output format for your subtitle file. + Formats []SubtitleFormat + + noSmithyDocumentSerde +} + +// Specify the output format for your subtitle file. +type SubtitlesOutput struct { + + // Specify the output format for your subtitle file; if you select both SRT and VTT + // formats, two output files are genereated. + Formats []SubtitleFormat + + // Choose the output location for your subtitle file. This location must be an S3 + // bucket. + SubtitleFileUris []string + + noSmithyDocumentSerde +} + // A key:value pair that adds metadata to a resource used by Amazon Transcribe. For // example, a tag with the key:value pair ‘Department’:’Sales’ might be added to a // resource to indicate its use by your organization's sales department. @@ -993,6 +1017,9 @@ type TranscriptionJob struct { // A timestamp that shows when the job started processing. StartTime *time.Time + // Generate subtitles for your batch transcription job. + Subtitles *SubtitlesOutput + // A key:value pair assigned to a given transcription job. Tags []Tag diff --git a/service/wafv2/api_op_GetManagedRuleSet.go b/service/wafv2/api_op_GetManagedRuleSet.go index 5cd78709f32..08c3f3f29ad 100644 --- a/service/wafv2/api_op_GetManagedRuleSet.go +++ b/service/wafv2/api_op_GetManagedRuleSet.go @@ -12,11 +12,11 @@ import ( ) // Retrieves the specified managed rule set. This is intended for use only by -// vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace -// sellers. Vendors, you can use the managed rule set APIs to provide controlled -// rollout of your versioned managed rule group offerings for your customers. The -// APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and -// UpdateManagedRuleSetVersionExpiryDate. +// vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web +// Services Marketplace sellers. Vendors, you can use the managed rule set APIs to +// provide controlled rollout of your versioned managed rule group offerings for +// your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, +// PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate. func (c *Client) GetManagedRuleSet(ctx context.Context, params *GetManagedRuleSetInput, optFns ...func(*Options)) (*GetManagedRuleSetOutput, error) { if params == nil { params = &GetManagedRuleSetInput{} diff --git a/service/wafv2/api_op_GetRateBasedStatementManagedKeys.go b/service/wafv2/api_op_GetRateBasedStatementManagedKeys.go index 4d6237a9498..8de7735bc5d 100644 --- a/service/wafv2/api_op_GetRateBasedStatementManagedKeys.go +++ b/service/wafv2/api_op_GetRateBasedStatementManagedKeys.go @@ -11,10 +11,19 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Retrieves the keys that are currently blocked by a rate-based rule. The maximum -// number of managed keys that can be blocked for a single rate-based rule is -// 10,000. If more than 10,000 addresses exceed the rate limit, those with the -// highest rates are blocked. +// Retrieves the keys that are currently blocked by a rate-based rule instance. The +// maximum number of managed keys that can be blocked for a single rate-based rule +// instance is 10,000. If more than 10,000 addresses exceed the rate limit, those +// with the highest rates are blocked. For a rate-based rule that you've defined +// inside a rule group, provide the name of the rule group reference statement in +// your request, in addition to the rate-based rule name and the web ACL name. WAF +// monitors web requests and manages keys independently for each unique combination +// of web ACL, optional rule group, and rate-based rule. For example, if you define +// a rate-based rule inside a rule group, and then use the rule group in a web ACL, +// WAF monitors web requests and manages keys for that web ACL, rule group +// reference statement, and rate-based rule instance. If you use the same rule +// group in a second web ACL, WAF monitors web requests and manages keys for this +// second usage completely independent of your first. func (c *Client) GetRateBasedStatementManagedKeys(ctx context.Context, params *GetRateBasedStatementManagedKeysInput, optFns ...func(*Options)) (*GetRateBasedStatementManagedKeysOutput, error) { if params == nil { params = &GetRateBasedStatementManagedKeysInput{} @@ -32,7 +41,10 @@ func (c *Client) GetRateBasedStatementManagedKeys(ctx context.Context, params *G type GetRateBasedStatementManagedKeysInput struct { - // The name of the rate-based rule to get the keys for. + // The name of the rate-based rule to get the keys for. If you have the rule + // defined inside a rule group that you're using in your web ACL, also provide the + // name of the rule group reference statement in the request parameter + // RuleGroupRuleName. // // This member is required. RuleName *string @@ -64,6 +76,10 @@ type GetRateBasedStatementManagedKeysInput struct { // This member is required. WebACLName *string + // The name of the rule group reference statement in your web ACL. This is required + // only when you have the rate-based rule nested inside a rule group. + RuleGroupRuleName *string + noSmithyDocumentSerde } diff --git a/service/wafv2/api_op_ListAvailableManagedRuleGroups.go b/service/wafv2/api_op_ListAvailableManagedRuleGroups.go index 606ff3dc371..0bb4ce8dee5 100644 --- a/service/wafv2/api_op_ListAvailableManagedRuleGroups.go +++ b/service/wafv2/api_op_ListAvailableManagedRuleGroups.go @@ -13,7 +13,8 @@ import ( // Retrieves an array of managed rule groups that are available for you to use. // This list includes all Amazon Web Services Managed Rules rule groups and all of -// the Marketplace managed rule groups that you're subscribed to. +// the Amazon Web Services Marketplace managed rule groups that you're subscribed +// to. func (c *Client) ListAvailableManagedRuleGroups(ctx context.Context, params *ListAvailableManagedRuleGroupsInput, optFns ...func(*Options)) (*ListAvailableManagedRuleGroupsOutput, error) { if params == nil { params = &ListAvailableManagedRuleGroupsInput{} diff --git a/service/wafv2/api_op_ListManagedRuleSets.go b/service/wafv2/api_op_ListManagedRuleSets.go index b066002f237..baf0aec6f0a 100644 --- a/service/wafv2/api_op_ListManagedRuleSets.go +++ b/service/wafv2/api_op_ListManagedRuleSets.go @@ -12,11 +12,11 @@ import ( ) // Retrieves the managed rule sets that you own. This is intended for use only by -// vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace -// sellers. Vendors, you can use the managed rule set APIs to provide controlled -// rollout of your versioned managed rule group offerings for your customers. The -// APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and -// UpdateManagedRuleSetVersionExpiryDate. +// vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web +// Services Marketplace sellers. Vendors, you can use the managed rule set APIs to +// provide controlled rollout of your versioned managed rule group offerings for +// your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, +// PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate. func (c *Client) ListManagedRuleSets(ctx context.Context, params *ListManagedRuleSetsInput, optFns ...func(*Options)) (*ListManagedRuleSetsOutput, error) { if params == nil { params = &ListManagedRuleSetsInput{} diff --git a/service/wafv2/api_op_PutManagedRuleSetVersions.go b/service/wafv2/api_op_PutManagedRuleSetVersions.go index 37a93895c83..6b670490f77 100644 --- a/service/wafv2/api_op_PutManagedRuleSetVersions.go +++ b/service/wafv2/api_op_PutManagedRuleSetVersions.go @@ -14,10 +14,10 @@ import ( // Defines the versions of your managed rule set that you are offering to the // customers. Customers see your offerings as managed rule groups with versioning. // This is intended for use only by vendors of managed rule sets. Vendors are -// Amazon Web Services and Marketplace sellers. Vendors, you can use the managed -// rule set APIs to provide controlled rollout of your versioned managed rule group -// offerings for your customers. The APIs are ListManagedRuleSets, -// GetManagedRuleSet, PutManagedRuleSetVersions, and +// Amazon Web Services and Amazon Web Services Marketplace sellers. Vendors, you +// can use the managed rule set APIs to provide controlled rollout of your +// versioned managed rule group offerings for your customers. The APIs are +// ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and // UpdateManagedRuleSetVersionExpiryDate. Customers retrieve their managed rule // group list by calling ListAvailableManagedRuleGroups. The name that you provide // here for your managed rule set is the name the customer sees for the diff --git a/service/wafv2/api_op_UpdateManagedRuleSetVersionExpiryDate.go b/service/wafv2/api_op_UpdateManagedRuleSetVersionExpiryDate.go index 4476221b1b9..82dba02da9d 100644 --- a/service/wafv2/api_op_UpdateManagedRuleSetVersionExpiryDate.go +++ b/service/wafv2/api_op_UpdateManagedRuleSetVersionExpiryDate.go @@ -17,10 +17,11 @@ import ( // expiration for a version, WAF excludes it from the reponse to // ListAvailableManagedRuleGroupVersions for the managed rule group. This is // intended for use only by vendors of managed rule sets. Vendors are Amazon Web -// Services and Marketplace sellers. Vendors, you can use the managed rule set APIs -// to provide controlled rollout of your versioned managed rule group offerings for -// your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, -// PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate. +// Services and Amazon Web Services Marketplace sellers. Vendors, you can use the +// managed rule set APIs to provide controlled rollout of your versioned managed +// rule group offerings for your customers. The APIs are ListManagedRuleSets, +// GetManagedRuleSet, PutManagedRuleSetVersions, and +// UpdateManagedRuleSetVersionExpiryDate. func (c *Client) UpdateManagedRuleSetVersionExpiryDate(ctx context.Context, params *UpdateManagedRuleSetVersionExpiryDateInput, optFns ...func(*Options)) (*UpdateManagedRuleSetVersionExpiryDateOutput, error) { if params == nil { params = &UpdateManagedRuleSetVersionExpiryDateInput{} diff --git a/service/wafv2/serializers.go b/service/wafv2/serializers.go index ead2799c954..2cef02607ae 100644 --- a/service/wafv2/serializers.go +++ b/service/wafv2/serializers.go @@ -3968,6 +3968,11 @@ func awsAwsjson11_serializeOpDocumentGetRateBasedStatementManagedKeysInput(v *Ge object := value.Object() defer object.Close() + if v.RuleGroupRuleName != nil { + ok := object.Key("RuleGroupRuleName") + ok.String(*v.RuleGroupRuleName) + } + if v.RuleName != nil { ok := object.Key("RuleName") ok.String(*v.RuleName) diff --git a/service/wafv2/types/enums.go b/service/wafv2/types/enums.go index bef2f19e8f2..997b697e770 100644 --- a/service/wafv2/types/enums.go +++ b/service/wafv2/types/enums.go @@ -767,6 +767,9 @@ const ( ParameterExceptionFieldBodyParsingFallbackBehavior ParameterExceptionField = "BODY_PARSING_FALLBACK_BEHAVIOR" ParameterExceptionFieldLoggingFilter ParameterExceptionField = "LOGGING_FILTER" ParameterExceptionFieldFilterCondition ParameterExceptionField = "FILTER_CONDITION" + ParameterExceptionFieldExpireTimestamp ParameterExceptionField = "EXPIRE_TIMESTAMP" + ParameterExceptionFieldChangePropagationStatus ParameterExceptionField = "CHANGE_PROPAGATION_STATUS" + ParameterExceptionFieldAssociableResource ParameterExceptionField = "ASSOCIABLE_RESOURCE" ) // Values returns all known values for ParameterExceptionField. Note that this can @@ -827,6 +830,9 @@ func (ParameterExceptionField) Values() []ParameterExceptionField { "BODY_PARSING_FALLBACK_BEHAVIOR", "LOGGING_FILTER", "FILTER_CONDITION", + "EXPIRE_TIMESTAMP", + "CHANGE_PROPAGATION_STATUS", + "ASSOCIABLE_RESOURCE", } } diff --git a/service/wafv2/types/types.go b/service/wafv2/types/types.go index 966069c5a64..adc9663fc13 100644 --- a/service/wafv2/types/types.go +++ b/service/wafv2/types/types.go @@ -22,7 +22,8 @@ type ActionCondition struct { // Inspect all of the elements that WAF has parsed and extracted from the web // request JSON body that are within the JsonBodyMatchScope. This is used with the // FieldToMatch option JsonBody. This is used only to indicate the web request -// component for WAF to inspect, in the FieldToMatch specification. +// component for WAF to inspect, in the FieldToMatch specification. JSON +// specification: "All": {} type All struct { noSmithyDocumentSerde } @@ -43,7 +44,8 @@ type AllowAction struct { } // All query arguments of a web request. This is used only to indicate the web -// request component for WAF to inspect, in the FieldToMatch specification. +// request component for WAF to inspect, in the FieldToMatch specification. JSON +// specification: "AllQueryArguments": {} type AllQueryArguments struct { noSmithyDocumentSerde } @@ -79,7 +81,7 @@ type BlockAction struct { // The body of a web request. This immediately follows the request headers. This is // used only to indicate the web request component for WAF to inspect, in the -// FieldToMatch specification. +// FieldToMatch specification. JSON specification: "Body": {} type Body struct { noSmithyDocumentSerde } @@ -325,6 +327,9 @@ type ExcludedRule struct { // needed, according to the type. You specify a single request component in // FieldToMatch for each rule statement that requires it. To inspect more than one // component of a web request, create a separate rule statement for each component. +// JSON specification for a QueryString field to match: "FieldToMatch": { +// "QueryString": {} } Example JSON for a Method field to match specification: +// "FieldToMatch": { "Method": { "Name": "DELETE" } } type FieldToMatch struct { // Inspect all query arguments. @@ -469,8 +474,8 @@ type FirewallManagerStatement struct { // A rule statement used to run the rules that are defined in a RuleGroup. To use // this, create a rule group with your rules, then provide the ARN of the rule // group in this statement. You cannot nest a RuleGroupReferenceStatement, for - // example for use inside a NotStatement or OrStatement. It can only be referenced - // as a top-level statement within a rule. + // example for use inside a NotStatement or OrStatement. You can only use a rule + // group reference statement at the top level inside a web ACL. RuleGroupReferenceStatement *RuleGroupReferenceStatement noSmithyDocumentSerde @@ -760,7 +765,8 @@ type IPSetSummary struct { // request headers. This is used in the FieldToMatch specification. Use the // specifications in this object to indicate which parts of the JSON body to // inspect using the rule's inspection criteria. WAF inspects only the parts of the -// JSON that result from the matches that you indicate. +// JSON that result from the matches that you indicate. Example JSON: "JsonBody": { +// "MatchPattern": { "All": {} }, "MatchScope": "ALL" } type JsonBody struct { // The patterns to look for in the JSON body. WAF inspects the results of these @@ -941,8 +947,9 @@ type LoggingConfiguration struct { ManagedByFirewallManager bool // The parts of the request that you want to keep out of the logs. For example, if - // you redact the HEADER field, the HEADER field in the firehose will be xxx. You - // must use one of the following values: URI, QUERY_STRING, HEADER, or METHOD. + // you redact the SingleHeader field, the HEADER field in the firehose will be xxx. + // You can specify only the following fields for redaction: UriPath, QueryString, + // SingleHeader, Method, and JsonBody. RedactedFields []FieldToMatch noSmithyDocumentSerde @@ -1013,12 +1020,13 @@ type ManagedRuleGroupStatement struct { // ListAvailableManagedRuleGroups. This provides information like the name and // vendor name, that you provide when you add a ManagedRuleGroupStatement to a web // ACL. Managed rule groups include Amazon Web Services Managed Rules rule groups, -// which are free of charge to WAF customers, and Marketplace managed rule groups, -// which you can subscribe to through Marketplace. +// which are free of charge to WAF customers, and Amazon Web Services Marketplace +// managed rule groups, which you can subscribe to through Amazon Web Services +// Marketplace. type ManagedRuleGroupSummary struct { // The description of the managed rule group, provided by Amazon Web Services - // Managed Rules or the Marketplace seller who manages it. + // Managed Rules or the Amazon Web Services Marketplace seller who manages it. Description *string // The name of the managed rule group. You use this, along with the vendor name, to @@ -1045,13 +1053,14 @@ type ManagedRuleGroupVersion struct { noSmithyDocumentSerde } -// A set of rules that is managed by Amazon Web Services and Marketplace sellers to -// provide versioned managed rule groups for customers of WAF. This is intended for -// use only by vendors of managed rule sets. Vendors are Amazon Web Services and -// Marketplace sellers. Vendors, you can use the managed rule set APIs to provide -// controlled rollout of your versioned managed rule group offerings for your -// customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, -// PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate. +// A set of rules that is managed by Amazon Web Services and Amazon Web Services +// Marketplace sellers to provide versioned managed rule groups for customers of +// WAF. This is intended for use only by vendors of managed rule sets. Vendors are +// Amazon Web Services and Amazon Web Services Marketplace sellers. Vendors, you +// can use the managed rule set APIs to provide controlled rollout of your +// versioned managed rule group offerings for your customers. The APIs are +// ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and +// UpdateManagedRuleSetVersionExpiryDate. type ManagedRuleSet struct { // The Amazon Resource Name (ARN) of the entity. @@ -1100,11 +1109,11 @@ type ManagedRuleSet struct { } // High-level information for a managed rule set. This is intended for use only by -// vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace -// sellers. Vendors, you can use the managed rule set APIs to provide controlled -// rollout of your versioned managed rule group offerings for your customers. The -// APIs are ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and -// UpdateManagedRuleSetVersionExpiryDate. +// vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web +// Services Marketplace sellers. Vendors, you can use the managed rule set APIs to +// provide controlled rollout of your versioned managed rule group offerings for +// your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, +// PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate. type ManagedRuleSetSummary struct { // The Amazon Resource Name (ARN) of the entity. @@ -1150,10 +1159,10 @@ type ManagedRuleSetSummary struct { } // Information for a single version of a managed rule set. This is intended for use -// only by vendors of managed rule sets. Vendors are Amazon Web Services and -// Marketplace sellers. Vendors, you can use the managed rule set APIs to provide -// controlled rollout of your versioned managed rule group offerings for your -// customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, +// only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon +// Web Services Marketplace sellers. Vendors, you can use the managed rule set APIs +// to provide controlled rollout of your versioned managed rule group offerings for +// your customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, // PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate. type ManagedRuleSetVersion struct { @@ -1195,6 +1204,7 @@ type ManagedRuleSetVersion struct { // The HTTP method of a web request. The method indicates the type of operation // that the request is asking the origin to perform. This is used only to indicate // the web request component for WAF to inspect, in the FieldToMatch specification. +// JSON specification: "Method": {} type Method struct { noSmithyDocumentSerde } @@ -1202,7 +1212,7 @@ type Method struct { // Specifies that WAF should do nothing. This is generally used to try out a rule // without performing any actions. You set the OverrideAction on the Rule. This is // used in the context of other settings, for example to specify values for -// RuleAction and web ACL DefaultAction. +// RuleAction and web ACL DefaultAction. JSON specification: "None": {} type NoneAction struct { noSmithyDocumentSerde } @@ -1258,7 +1268,8 @@ type OverrideAction struct { // The query string of a web request. This is the part of a URL that appears after // a ? character, if any. This is used only to indicate the web request component -// for WAF to inspect, in the FieldToMatch specification. +// for WAF to inspect, in the FieldToMatch specification. JSON specification: +// "QueryString": {} type QueryString struct { noSmithyDocumentSerde } @@ -1267,28 +1278,35 @@ type QueryString struct { // and triggers the rule action when the rate exceeds a limit that you specify on // the number of requests in any 5-minute time span. You can use this to put a // temporary block on requests from an IP address that is sending excessive -// requests. When the rule action triggers, WAF blocks additional requests from the -// IP address until the request rate falls below the limit. You can optionally nest -// another statement inside the rate-based statement, to narrow the scope of the -// rule so that it only counts requests that match the nested statement. For -// example, based on recent requests that you have seen from an attacker, you might -// create a rate-based rule with a nested AND rule statement that contains the -// following nested statements: +// requests. WAF tracks and manages web requests separately for each instance of a +// rate-based rule that you use. For example, if you provide the same rate-based +// rule settings in two web ACLs, each of the two rule statements represents a +// separate instance of the rate-based rule and gets its own tracking and +// management by WAF. If you define a rate-based rule inside a rule group, and then +// use that rule group in multiple places, each use creates a separate instance of +// the rate-based rule that gets its own tracking and management by WAF. When the +// rule action triggers, WAF blocks additional requests from the IP address until +// the request rate falls below the limit. You can optionally nest another +// statement inside the rate-based statement, to narrow the scope of the rule so +// that it only counts requests that match the nested statement. For example, based +// on recent requests that you have seen from an attacker, you might create a +// rate-based rule with a nested AND rule statement that contains the following +// nested statements: // -// * An IP match statement with an IP set that -// specified the address 192.0.2.44. +// * An IP match statement with an IP set that specified the +// address 192.0.2.44. // -// * A string match statement that searches in -// the User-Agent header for the string BadBot. +// * A string match statement that searches in the User-Agent +// header for the string BadBot. // -// In this rate-based rule, you also -// define a rate limit. For this example, the rate limit is 1,000. Requests that -// meet both of the conditions in the statements are counted. If the count exceeds -// 1,000 requests per five minutes, the rule action triggers. Requests that do not -// meet both conditions are not counted towards the rate limit and are not affected -// by this rule. You cannot nest a RateBasedStatement, for example for use inside a -// NotStatement or OrStatement. It can only be referenced as a top-level statement -// within a rule. +// In this rate-based rule, you also define a rate +// limit. For this example, the rate limit is 1,000. Requests that meet both of the +// conditions in the statements are counted. If the count exceeds 1,000 requests +// per five minutes, the rule action triggers. Requests that do not meet both +// conditions are not counted towards the rate limit and are not affected by this +// rule. You cannot nest a RateBasedStatement inside another statement, for example +// inside a NotStatement or OrStatement. You can define a RateBasedStatement inside +// a web ACL and inside a rule group. type RateBasedStatement struct { // Setting that indicates how to aggregate the request counts. The options are the @@ -1329,7 +1347,7 @@ type RateBasedStatement struct { noSmithyDocumentSerde } -// The set of IP addresses that are currently blocked for a rate-based statement. +// The set of IP addresses that are currently blocked for a RateBasedStatement. type RateBasedStatementManagedKeysIPSet struct { // The IP addresses that are currently blocked. @@ -1638,8 +1656,8 @@ type RuleGroup struct { // A rule statement used to run the rules that are defined in a RuleGroup. To use // this, create a rule group with your rules, then provide the ARN of the rule // group in this statement. You cannot nest a RuleGroupReferenceStatement, for -// example for use inside a NotStatement or OrStatement. It can only be referenced -// as a top-level statement within a rule. +// example for use inside a NotStatement or OrStatement. You can only use a rule +// group reference statement at the top level inside a web ACL. type RuleGroupReferenceStatement struct { // The Amazon Resource Name (ARN) of the entity. @@ -1754,6 +1772,7 @@ type SampledHTTPRequest struct { // One of the headers in a web request, identified by name, for example, User-Agent // or Referer. This setting isn't case sensitive. This is used only to indicate the // web request component for WAF to inspect, in the FieldToMatch specification. +// Example JSON: "SingleHeader": { "Name": "haystack" } type SingleHeader struct { // The name of the query header to inspect. @@ -1766,6 +1785,7 @@ type SingleHeader struct { // One query argument in a web request, identified by name, for example UserName or // SalesRegion. The name can be up to 30 characters long and isn't case sensitive. +// Example JSON: "SingleQueryArgument": { "Name": "myArgument" } type SingleQueryArgument struct { // The name of the query argument to inspect. @@ -1902,28 +1922,35 @@ type Statement struct { // and triggers the rule action when the rate exceeds a limit that you specify on // the number of requests in any 5-minute time span. You can use this to put a // temporary block on requests from an IP address that is sending excessive - // requests. When the rule action triggers, WAF blocks additional requests from the - // IP address until the request rate falls below the limit. You can optionally nest - // another statement inside the rate-based statement, to narrow the scope of the - // rule so that it only counts requests that match the nested statement. For - // example, based on recent requests that you have seen from an attacker, you might - // create a rate-based rule with a nested AND rule statement that contains the - // following nested statements: - // - // * An IP match statement with an IP set that - // specified the address 192.0.2.44. - // - // * A string match statement that searches in - // the User-Agent header for the string BadBot. - // - // In this rate-based rule, you also - // define a rate limit. For this example, the rate limit is 1,000. Requests that - // meet both of the conditions in the statements are counted. If the count exceeds - // 1,000 requests per five minutes, the rule action triggers. Requests that do not - // meet both conditions are not counted towards the rate limit and are not affected - // by this rule. You cannot nest a RateBasedStatement, for example for use inside a - // NotStatement or OrStatement. It can only be referenced as a top-level statement - // within a rule. + // requests. WAF tracks and manages web requests separately for each instance of a + // rate-based rule that you use. For example, if you provide the same rate-based + // rule settings in two web ACLs, each of the two rule statements represents a + // separate instance of the rate-based rule and gets its own tracking and + // management by WAF. If you define a rate-based rule inside a rule group, and then + // use that rule group in multiple places, each use creates a separate instance of + // the rate-based rule that gets its own tracking and management by WAF. When the + // rule action triggers, WAF blocks additional requests from the IP address until + // the request rate falls below the limit. You can optionally nest another + // statement inside the rate-based statement, to narrow the scope of the rule so + // that it only counts requests that match the nested statement. For example, based + // on recent requests that you have seen from an attacker, you might create a + // rate-based rule with a nested AND rule statement that contains the following + // nested statements: + // + // * An IP match statement with an IP set that specified the + // address 192.0.2.44. + // + // * A string match statement that searches in the User-Agent + // header for the string BadBot. + // + // In this rate-based rule, you also define a rate + // limit. For this example, the rate limit is 1,000. Requests that meet both of the + // conditions in the statements are counted. If the count exceeds 1,000 requests + // per five minutes, the rule action triggers. Requests that do not meet both + // conditions are not counted towards the rate limit and are not affected by this + // rule. You cannot nest a RateBasedStatement inside another statement, for example + // inside a NotStatement or OrStatement. You can define a RateBasedStatement inside + // a web ACL and inside a rule group. RateBasedStatement *RateBasedStatement // A rule statement used to search web request components for matches with regular @@ -1940,8 +1967,8 @@ type Statement struct { // A rule statement used to run the rules that are defined in a RuleGroup. To use // this, create a rule group with your rules, then provide the ARN of the rule // group in this statement. You cannot nest a RuleGroupReferenceStatement, for - // example for use inside a NotStatement or OrStatement. It can only be referenced - // as a top-level statement within a rule. + // example for use inside a NotStatement or OrStatement. You can only use a rule + // group reference statement at the top level inside a web ACL. RuleGroupReferenceStatement *RuleGroupReferenceStatement // A rule statement that compares a number of bytes against the size of a request @@ -2177,18 +2204,18 @@ type TimeWindow struct { // The path component of the URI of a web request. This is the part of a web // request that identifies a resource. For example, /images/daily-ad.jpg. This is // used only to indicate the web request component for WAF to inspect, in the -// FieldToMatch specification. +// FieldToMatch specification. JSON specification: "UriPath": {} type UriPath struct { noSmithyDocumentSerde } // A version of the named managed rule group, that the rule group's vendor // publishes for use by customers. This is intended for use only by vendors of -// managed rule sets. Vendors are Amazon Web Services and Marketplace sellers. -// Vendors, you can use the managed rule set APIs to provide controlled rollout of -// your versioned managed rule group offerings for your customers. The APIs are -// ListManagedRuleSets, GetManagedRuleSet, PutManagedRuleSetVersions, and -// UpdateManagedRuleSetVersionExpiryDate. +// managed rule sets. Vendors are Amazon Web Services and Amazon Web Services +// Marketplace sellers. Vendors, you can use the managed rule set APIs to provide +// controlled rollout of your versioned managed rule group offerings for your +// customers. The APIs are ListManagedRuleSets, GetManagedRuleSet, +// PutManagedRuleSetVersions, and UpdateManagedRuleSetVersionExpiryDate. type VersionToPublish struct { // The Amazon Resource Name (ARN) of the vendor's rule group that's used in the